ChaseHan commited on
Commit
833dac3
·
verified ·
1 Parent(s): 4174457

Upload 15 files

Browse files
Files changed (14) hide show
  1. Project.md +126 -0
  2. cache_utils.py +147 -0
  3. concept_handler.js +191 -0
  4. concept_handler.py +121 -0
  5. concept_interaction.js +142 -0
  6. config.py +15 -0
  7. few_shots_examples.py +212 -0
  8. llm_chain.py +199 -0
  9. llm_utils.py +195 -0
  10. prompts.py +241 -0
  11. requirements.txt +7 -0
  12. run.bat +27 -0
  13. run.sh +55 -0
  14. visualization.py +212 -0
Project.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 教育LLM助手项目文档
2
+
3
+ ## 项目概述
4
+
5
+ 这是一个基于Gradio构建的教育类LLM应用,主要功能是将学习问题分解为相关概念并提供详细解释。
6
+
7
+ ## 系统架构
8
+
9
+ ### 主要组件
10
+
11
+ 1. **前端界面** (app.py)
12
+ - 使用Gradio构建的Web界面
13
+ - 包含用户配置、问题输入、概念图谱展示和概念解释等模块
14
+
15
+ 2. **LLM链** (llm_chain.py)
16
+ - 处理核心的AI交互逻辑
17
+ - 包含两个主要功能:概念分解和概念解释
18
+
19
+ 3. **可视化模块** (visualization.py)
20
+ - 负责生成概念知识图谱
21
+
22
+ ### 数据流程
23
+
24
+ ```mermaid
25
+ graph LR
26
+ A[用户输入] --> B[问题分析]
27
+ B --> C[概念分解]
28
+ C --> D[知识图谱生成]
29
+ C --> E[概念卡片生成]
30
+ E --> F[概念选择]
31
+ F --> G[概念解释生成]
32
+ ```
33
+
34
+ ## Prompt系统
35
+
36
+ ### 1. 概念分解 Prompt (prompts.py)
37
+
38
+ 输入参数:
39
+ - question: 用户问题
40
+ - grade: 年级水平
41
+ - subject: 学科
42
+ - learning_needs: 学习需求
43
+
44
+ Prompt模板:
45
+ ```python
46
+ def generate_decomposition_prompt(question, grade, subject, learning_needs):
47
+ return f"""
48
+ 作为一名{subject}教育专家,请分析以下{grade}级学生的问题:
49
+
50
+ 问题:{question}
51
+
52
+ 学习需求:{learning_needs}
53
+
54
+ 请将这个问题分解为相关的核心概念,并说明它们之间的关系。
55
+ """
56
+ ```
57
+
58
+ ### 2. 概念解释 Prompt (prompts.py)
59
+
60
+ 输入参数:
61
+ - concept_name: 概念名称
62
+ - concept_description: 概念描述
63
+ - grade: 年级水平
64
+ - subject: 学科
65
+ - learning_needs: 学习需求
66
+
67
+ Prompt模板:
68
+ ```python
69
+ def generate_explanation_prompt(concept_name, concept_description, grade, subject, learning_needs):
70
+ return f"""
71
+ 请为{grade}级{subject}学生解释以下概念:
72
+
73
+ 概念:{concept_name}
74
+ 描述:{concept_description}
75
+
76
+ 学习需求:{learning_needs}
77
+
78
+ 请提供:
79
+ 1. 详细解释
80
+ 2. 具体例子
81
+ 3. 相关学习资源
82
+ 4. 练习题
83
+ """
84
+ ```
85
+
86
+ ## 处理流程
87
+
88
+ 1. **用户输入阶段**
89
+ - 用户设置学习配置(年级、学科、学习需求)
90
+ - 输入学习问题
91
+
92
+ 2. **概念分解阶段**
93
+ - 系统使用decomposition prompt分析问题
94
+ - 生成相关概念及其关系
95
+ - 创建概念知识图谱
96
+ - 生成概念卡片
97
+
98
+ 3. **概念解释阶段**
99
+ - 用户选择特定概念
100
+ - 系统使用explanation prompt生成详细解释
101
+ - 提供例子、资源和练习题
102
+
103
+ 4. **缓存机制**
104
+ - 使用AppState类管理状态
105
+ - 缓存已生成的概念解释
106
+ - 存储用户配置和当前概念数据
107
+
108
+ ## 输出格式
109
+
110
+ 1. **概念分解输出**
111
+ - 概念列表(名称、描述、难度)
112
+ - 概念间关系
113
+ - 可视化知识图谱
114
+
115
+ 2. **概念解释输出**
116
+ - 详细解释文本
117
+ - 示例(包含难度分级)
118
+ - 学习资源链接
119
+ - 练习题(带答案)
120
+
121
+ ## 注意事项
122
+
123
+ 1. 所有输出都经过格式化,以适应不同年级水平
124
+ 2. 系统支持多语言(通过用户界面配置)
125
+ 3. 实现了错误处理和优雅的降级机制
126
+ 4. 使用缓存来提高响应速度
cache_utils.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility module for caching LLM responses
3
+ """
4
+ import os
5
+ import json
6
+ import hashlib
7
+ from typing import Dict, Any, Optional
8
+ from config import CACHE_ENABLED, DEBUG_MODE
9
+
10
+ # Cache directory
11
+ CACHE_DIR = "cache"
12
+ os.makedirs(CACHE_DIR, exist_ok=True)
13
+
14
+ def generate_cache_key(prompt_type: str, params: Dict[str, Any]) -> str:
15
+ """
16
+ Generate a cache key based on prompt type and parameters
17
+
18
+ Args:
19
+ prompt_type: Prompt type, such as 'decompose' or 'explain'
20
+ params: Prompt parameters
21
+
22
+ Returns:
23
+ Cache key string
24
+ """
25
+ # Convert parameters to a standardized JSON string
26
+ params_str = json.dumps(params, sort_keys=True, ensure_ascii=False)
27
+
28
+ # Calculate hash value
29
+ hash_obj = hashlib.md5(f"{prompt_type}:{params_str}".encode('utf-8'))
30
+ return hash_obj.hexdigest()
31
+
32
+ def save_to_cache(cache_key: str, data: Dict[str, Any]) -> None:
33
+ """
34
+ Save data to cache file
35
+
36
+ Args:
37
+ cache_key: Cache key
38
+ data: Data to be cached
39
+ """
40
+ cache_path = os.path.join(CACHE_DIR, f"{cache_key}.json")
41
+
42
+ with open(cache_path, 'w', encoding='utf-8') as f:
43
+ json.dump(data, f, ensure_ascii=False, indent=2)
44
+
45
+ def load_from_cache(cache_key: str) -> Optional[Dict[str, Any]]:
46
+ """
47
+ Load data from cache
48
+
49
+ Args:
50
+ cache_key: Cache key
51
+
52
+ Returns:
53
+ Cached data, or None if it doesn't exist
54
+ """
55
+ cache_path = os.path.join(CACHE_DIR, f"{cache_key}.json")
56
+
57
+ if not os.path.exists(cache_path):
58
+ return None
59
+
60
+ try:
61
+ with open(cache_path, 'r', encoding='utf-8') as f:
62
+ return json.load(f)
63
+ except (json.JSONDecodeError, IOError):
64
+ # Return None if file is corrupted or cannot be read
65
+ return None
66
+
67
+ def cached_llm_call(prompt_type: str, params: Dict[str, Any], call_function) -> Dict[str, Any]:
68
+ """
69
+ Cache decorator for LLM calls
70
+
71
+ Args:
72
+ prompt_type: Prompt type
73
+ params: Prompt parameters
74
+ call_function: Actual function to call LLM
75
+
76
+ Returns:
77
+ LLM response or cached response
78
+ """
79
+ # Generate cache key
80
+ cache_key = generate_cache_key(prompt_type, params)
81
+
82
+ # Try to load from cache
83
+ cached_result = load_from_cache(cache_key)
84
+ if cached_result:
85
+ print(f"[Cache] Using cached response: {prompt_type}")
86
+ return cached_result
87
+
88
+ # If not cached, call LLM
89
+ result = call_function(params)
90
+
91
+ # Save to cache
92
+ save_to_cache(cache_key, result)
93
+
94
+ return result
95
+
96
+ def get_from_cache(cache_key: str) -> Optional[str]:
97
+ """
98
+ Get data directly from cache
99
+
100
+ Args:
101
+ cache_key: Cache key (can be a string)
102
+
103
+ Returns:
104
+ Cached data, or None if it doesn't exist
105
+ """
106
+ if not CACHE_ENABLED:
107
+ return None
108
+
109
+ # Hash the cache key
110
+ hash_obj = hashlib.md5(cache_key.encode('utf-8'))
111
+ hashed_key = hash_obj.hexdigest()
112
+
113
+ cache_path = os.path.join(CACHE_DIR, f"{hashed_key}.json")
114
+
115
+ if not os.path.exists(cache_path):
116
+ return None
117
+
118
+ try:
119
+ with open(cache_path, 'r', encoding='utf-8') as f:
120
+ if DEBUG_MODE:
121
+ print(f"Loading from cache: {cache_key[:30]}...")
122
+ return f.read()
123
+ except (IOError):
124
+ # Return None if file cannot be read
125
+ return None
126
+
127
+ def save_to_cache(cache_key: str, data: str) -> None:
128
+ """
129
+ Save data to cache file
130
+
131
+ Args:
132
+ cache_key: Cache key (can be a string)
133
+ data: Data string to cache
134
+ """
135
+ if not CACHE_ENABLED:
136
+ return
137
+
138
+ # Hash the cache key
139
+ hash_obj = hashlib.md5(cache_key.encode('utf-8'))
140
+ hashed_key = hash_obj.hexdigest()
141
+
142
+ cache_path = os.path.join(CACHE_DIR, f"{hashed_key}.json")
143
+
144
+ with open(cache_path, 'w', encoding='utf-8') as f:
145
+ f.write(data)
146
+ if DEBUG_MODE:
147
+ print(f"Data cached: {cache_key[:30]}...")
concept_handler.js ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // 处理概念卡片点击事件
2
+ function handleConceptClick(conceptId) {
3
+ // 将点击的概念ID传递给Gradio
4
+ if (typeof gradioApp !== 'undefined') {
5
+ const app = gradioApp();
6
+ const hiddenInput = app.querySelector('#concept-selection');
7
+ if (hiddenInput) {
8
+ hiddenInput.value = conceptId;
9
+ // 触发change事件
10
+ const event = new Event('change');
11
+ hiddenInput.dispatchEvent(event);
12
+ }
13
+ }
14
+ }
15
+
16
+ // 添加点击事件监听器到所有概念卡片
17
+ function addConceptCardListeners() {
18
+ const conceptCards = document.querySelectorAll('.concept-card');
19
+ conceptCards.forEach(card => {
20
+ card.addEventListener('click', function() {
21
+ const conceptId = this.getAttribute('data-concept-id');
22
+ if (conceptId) {
23
+ handleConceptClick(conceptId);
24
+ // 添加视觉反馈
25
+ highlightSelectedCard(this);
26
+ }
27
+ });
28
+ });
29
+ }
30
+
31
+ // 高亮选中的卡片
32
+ function highlightSelectedCard(selectedCard) {
33
+ // 移除所有卡片的高亮
34
+ document.querySelectorAll('.concept-card').forEach(card => {
35
+ card.classList.remove('selected-card');
36
+ });
37
+ // 添加高亮到选中的卡片
38
+ selectedCard.classList.add('selected-card');
39
+ }
40
+
41
+ // 在知识图谱节点上添加悬停效果
42
+ function addGraphNodeHoverEffects() {
43
+ const nodes = document.querySelectorAll('#concept-graph svg .node');
44
+ nodes.forEach(node => {
45
+ node.addEventListener('mouseenter', function() {
46
+ this.classList.add('node-hover');
47
+ });
48
+ node.addEventListener('mouseleave', function() {
49
+ this.classList.remove('node-hover');
50
+ });
51
+ });
52
+ }
53
+
54
+ // 增强图像显示
55
+ function enhanceImageDisplay() {
56
+ const graphContainer = document.querySelector('#concept-graph');
57
+ if (graphContainer) {
58
+ const img = graphContainer.querySelector('img');
59
+ if (img) {
60
+ // 确保图像加载完成后应用样式
61
+ img.onload = function() {
62
+ this.style.maxWidth = '100%';
63
+ this.style.height = 'auto';
64
+ this.style.borderRadius = '8px';
65
+ this.style.boxShadow = '0 4px 8px rgba(0,0,0,0.1)';
66
+ };
67
+ }
68
+ }
69
+ }
70
+
71
+ // 在DOM加载完成后初始化
72
+ document.addEventListener('DOMContentLoaded', function() {
73
+ // 知识图谱交互
74
+ const conceptGraph = document.querySelector('#concept-graph');
75
+ if (conceptGraph) {
76
+ const img = conceptGraph.querySelector('img');
77
+ if (img) {
78
+ enhanceImageDisplay();
79
+ }
80
+
81
+ const nodes = conceptGraph.querySelectorAll('g.node');
82
+ nodes.forEach(node => {
83
+ node.style.cursor = 'pointer';
84
+ node.addEventListener('click', function() {
85
+ const conceptId = this.getAttribute('data-concept-id');
86
+ if (conceptId && conceptId !== 'main') {
87
+ handleConceptClick(conceptId);
88
+ }
89
+ });
90
+ });
91
+ // 添加悬停效果
92
+ addGraphNodeHoverEffects();
93
+ }
94
+
95
+ // 概念卡片交互
96
+ addConceptCardListeners();
97
+
98
+ // 添加CSS样式
99
+ addCustomStyles();
100
+ });
101
+
102
+ // 添加自定义样式
103
+ function addCustomStyles() {
104
+ const style = document.createElement('style');
105
+ style.textContent = `
106
+ /* 通用样式设置,支持中文 */
107
+ body {
108
+ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Microsoft YaHei", "WenQuanYi Micro Hei", sans-serif;
109
+ }
110
+
111
+ /* 概念卡片样式 */
112
+ .concept-card {
113
+ transition: all 0.3s ease;
114
+ border: 1px solid #ddd;
115
+ border-radius: 8px;
116
+ padding: 12px;
117
+ margin-bottom: 10px;
118
+ cursor: pointer;
119
+ }
120
+ .concept-card:hover {
121
+ box-shadow: 0 2px 12px rgba(0,0,0,0.15);
122
+ transform: translateY(-2px);
123
+ }
124
+ .selected-card {
125
+ border-color: #2196F3;
126
+ background-color: rgba(33, 150, 243, 0.05);
127
+ box-shadow: 0 0 0 2px rgba(33, 150, 243, 0.3);
128
+ }
129
+ .concept-title {
130
+ font-weight: bold;
131
+ margin-bottom: 5px;
132
+ }
133
+ .concept-desc {
134
+ font-size: 0.9em;
135
+ color: #555;
136
+ }
137
+
138
+ /* 图表节点样式 */
139
+ .node-hover {
140
+ opacity: 0.8;
141
+ transform: scale(1.05);
142
+ }
143
+
144
+ /* 知识图谱容器样式 */
145
+ #concept-graph img {
146
+ max-width: 100%;
147
+ height: auto;
148
+ border-radius: 8px;
149
+ box-shadow: 0 4px 8px rgba(0,0,0,0.1);
150
+ }
151
+ `;
152
+ document.head.appendChild(style);
153
+ }
154
+
155
+ // Gradio元素变化时重新添加监听器
156
+ // 这是必要的,因为Gradio可能会动态替换DOM元素
157
+ const observer = new MutationObserver(function(mutations) {
158
+ mutations.forEach(function(mutation) {
159
+ if (mutation.addedNodes && mutation.addedNodes.length > 0) {
160
+ // 检查是否有新的概念卡片添加
161
+ for (let i = 0; i < mutation.addedNodes.length; i++) {
162
+ const node = mutation.addedNodes[i];
163
+ if (node.classList && node.classList.contains('concept-card')) {
164
+ addConceptCardListeners();
165
+ break;
166
+ }
167
+ }
168
+
169
+ // 检查是否有新的知识图谱添加
170
+ const conceptGraph = document.querySelector('#concept-graph');
171
+ if (conceptGraph) {
172
+ addGraphNodeHoverEffects();
173
+ enhanceImageDisplay();
174
+ }
175
+ }
176
+ });
177
+ });
178
+
179
+ // 开始观察DOM变化
180
+ window.addEventListener('load', function() {
181
+ const targetNode = document.getElementById('concept-cards');
182
+ if (targetNode) {
183
+ observer.observe(targetNode, { childList: true, subtree: true });
184
+ }
185
+
186
+ // 也观察知识图谱容器
187
+ const graphContainer = document.getElementById('concept-graph');
188
+ if (graphContainer) {
189
+ observer.observe(graphContainer, { childList: true, subtree: true });
190
+ }
191
+ });
concept_handler.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Concept Handler Module - Contains mock data for fallback when API calls fail
3
+ """
4
+
5
+ # Mock concept decomposition result
6
+ MOCK_DECOMPOSITION_RESULT = {
7
+ "main_concept": "Equation Solving",
8
+ "sub_concepts": [
9
+ {
10
+ "id": "concept_1",
11
+ "name": "Equality Properties",
12
+ "description": "Performing the same add, subtract, multiply, or divide operations on both sides of an equation maintains the equality"
13
+ },
14
+ {
15
+ "id": "concept_2",
16
+ "name": "Transposition",
17
+ "description": "Moving terms from one side of an equation to the other while changing their signs"
18
+ },
19
+ {
20
+ "id": "concept_3",
21
+ "name": "Combining Like Terms",
22
+ "description": "Combining similar terms in the equation"
23
+ },
24
+ {
25
+ "id": "concept_4",
26
+ "name": "Solution Verification",
27
+ "description": "Substituting the solution back into the original equation to verify that the equality holds"
28
+ },
29
+ {
30
+ "id": "concept_5",
31
+ "name": "Fractional Equations",
32
+ "description": "Equations containing fractions, usually requiring finding a common denominator"
33
+ },
34
+ {
35
+ "id": "concept_6",
36
+ "name": "Algebraic Expression",
37
+ "description": "Expression formed by numbers and letters through a finite number of arithmetic operations"
38
+ }
39
+ ],
40
+ "relationships": [
41
+ {
42
+ "source": "concept_1",
43
+ "target": "concept_2",
44
+ "type": "prerequisite"
45
+ },
46
+ {
47
+ "source": "concept_2",
48
+ "target": "concept_3",
49
+ "type": "related"
50
+ },
51
+ {
52
+ "source": "concept_3",
53
+ "target": "concept_4",
54
+ "type": "prerequisite"
55
+ },
56
+ {
57
+ "source": "concept_1",
58
+ "target": "concept_5",
59
+ "type": "related"
60
+ },
61
+ {
62
+ "source": "concept_6",
63
+ "target": "concept_3",
64
+ "type": "prerequisite"
65
+ }
66
+ ]
67
+ }
68
+
69
+ # Mock concept explanation result
70
+ MOCK_EXPLANATION_RESULT = {
71
+ "explanation": "Equality properties are fundamental principles in mathematics, stating that when the same mathematical operations (addition, subtraction, multiplication, division) are performed on both sides of an equation, the equality relationship is maintained. This is the basic principle for solving equations. For example, in the equation x+3=5, subtracting 3 from both sides gives x=2, with the equality still holding.",
72
+ "examples": [
73
+ {
74
+ "problem": "Solve the equation: 2x + 3 = 7",
75
+ "solution": "2x + 3 = 7\nSubtract 3: 2x + 3 - 3 = 7 - 3\nSimplify: 2x = 4\nDivide both sides by 2: 2x ÷ 2 = 4 ÷ 2\nResult: x = 2",
76
+ "difficulty": "Easy"
77
+ },
78
+ {
79
+ "problem": "Solve the equation: 3x - 4 = 2x + 5",
80
+ "solution": "3x - 4 = 2x + 5\nTranspose: 3x - 2x = 5 + 4\nCombine like terms: x = 9\nVerify: 3(9) - 4 = 2(9) + 5\n 27 - 4 = 18 + 5\n 23 = 23 ✓",
81
+ "difficulty": "Medium"
82
+ }
83
+ ],
84
+ "resources": [
85
+ {
86
+ "type": "Video",
87
+ "title": "Equality Properties and Equation Basics",
88
+ "description": "Detailed explanation of equality properties and their applications in equation solving",
89
+ "link": "https://example.com/equality-properties"
90
+ },
91
+ {
92
+ "type": "Article",
93
+ "title": "Understanding Equality Properties",
94
+ "description": "Understanding the concept of equality properties through illustrations and examples",
95
+ "link": "https://example.com/understanding-equality"
96
+ },
97
+ {
98
+ "type": "Interactive Tool",
99
+ "title": "Equation Balance Trainer",
100
+ "description": "Interactive tool to help understand the concept of balancing both sides of an equation",
101
+ "link": "https://example.com/balance-equations"
102
+ }
103
+ ],
104
+ "practice_questions": [
105
+ {
106
+ "question": "Solve the equation: 5x - 2 = 13",
107
+ "answer": "x = 3",
108
+ "difficulty": "Easy"
109
+ },
110
+ {
111
+ "question": "Solve the equation: 4(x - 1) = 2(x + 5)",
112
+ "answer": "x = 7",
113
+ "difficulty": "Medium"
114
+ },
115
+ {
116
+ "question": "Solve the equation: 2x/3 + 1 = 5/6",
117
+ "answer": "x = -1/4",
118
+ "difficulty": "Hard"
119
+ }
120
+ ]
121
+ }
concept_interaction.js ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // concept_interaction.js - 处理概念卡片的交互
2
+
3
+ // 页面加载完成后初始化交互
4
+ document.addEventListener('DOMContentLoaded', function() {
5
+ console.log('概念交互模块已加载');
6
+ setupConceptCardInteractions();
7
+ setupImageEnhancements();
8
+ });
9
+
10
+ // 设置概念卡片交互
11
+ function setupConceptCardInteractions() {
12
+ // 使用事件委托处理所有卡片点击
13
+ document.body.addEventListener('click', function(event) {
14
+ const card = event.target.closest('.concept-card');
15
+ if (!card) return;
16
+
17
+ const conceptId = card.getAttribute('data-concept-id');
18
+ if (!conceptId) {
19
+ console.error('卡片没有设置 data-concept-id 属性');
20
+ return;
21
+ }
22
+
23
+ console.log(`点击了概念卡片: ${conceptId}`);
24
+
25
+ // 显示加载动画
26
+ showLoadingInDetailPanel();
27
+
28
+ // 更新选中状态
29
+ updateSelectedCard(card);
30
+
31
+ // 触发 Gradio 事件
32
+ triggerConceptSelection(conceptId);
33
+ });
34
+ }
35
+
36
+ // 在详情面板显示加载动画
37
+ function showLoadingInDetailPanel() {
38
+ const detailContainer = document.querySelector('.concept-detail-box');
39
+ if (detailContainer) {
40
+ detailContainer.innerHTML = `
41
+ <div class="loading">
42
+ <div class="loading-spinner"></div>
43
+ <div class="loading-text">正在生成概念详细解释...</div>
44
+ </div>
45
+ `;
46
+ }
47
+ }
48
+
49
+ // 更新选中的卡片样式
50
+ function updateSelectedCard(selectedCard) {
51
+ document.querySelectorAll('.concept-card').forEach(card => {
52
+ card.classList.remove('selected-card');
53
+ });
54
+ selectedCard.classList.add('selected-card');
55
+ }
56
+
57
+ // 触发 Gradio 输入事件
58
+ function triggerConceptSelection(conceptId) {
59
+ const conceptSelection = document.getElementById('concept-selection');
60
+ if (conceptSelection) {
61
+ conceptSelection.value = conceptId;
62
+ // 使用 input 事件,更可靠地触发 Gradio
63
+ conceptSelection.dispatchEvent(new Event('input', { bubbles: true }));
64
+ console.log(`已触发概念选择事件: ${conceptId}`);
65
+ } else {
66
+ console.error('找不到概念选择输入框 (concept-selection)');
67
+ }
68
+ }
69
+
70
+ // 增强图像显示
71
+ function setupImageEnhancements() {
72
+ const graphContainer = document.getElementById('concept-graph');
73
+ if (!graphContainer) return;
74
+
75
+ // 监视节点变化
76
+ const observer = new MutationObserver(function(mutations) {
77
+ mutations.forEach(function(mutation) {
78
+ if (mutation.type === 'childList') {
79
+ const img = graphContainer.querySelector('img');
80
+ if (img) {
81
+ enhanceImage(img);
82
+ }
83
+ }
84
+ });
85
+ });
86
+
87
+ observer.observe(graphContainer, { childList: true, subtree: true });
88
+ }
89
+
90
+ // 增强图像显示效果
91
+ function enhanceImage(img) {
92
+ img.style.maxWidth = '100%';
93
+ img.style.height = 'auto';
94
+ img.style.borderRadius = '8px';
95
+ img.style.boxShadow = '0 4px 8px rgba(0,0,0,0.1)';
96
+ img.style.transition = 'transform 0.3s ease, box-shadow 0.3s ease';
97
+
98
+ // 添加悬停效果
99
+ img.onmouseover = function() {
100
+ this.style.transform = 'scale(1.01)';
101
+ this.style.boxShadow = '0 6px 12px rgba(0,0,0,0.15)';
102
+ };
103
+ img.onmouseout = function() {
104
+ this.style.transform = 'scale(1)';
105
+ this.style.boxShadow = '0 4px 8px rgba(0,0,0,0.1)';
106
+ };
107
+ }
108
+
109
+ // 诊断功能:检查所有概念卡片ID
110
+ function checkAllConceptCards() {
111
+ const cards = document.querySelectorAll('.concept-card');
112
+ console.log(`发现 ${cards.length} 个概念卡片`);
113
+
114
+ const idMap = {};
115
+ cards.forEach((card, index) => {
116
+ const id = card.getAttribute('data-concept-id');
117
+ console.log(`卡片 #${index + 1}: ID=${id}`);
118
+ if (id) {
119
+ idMap[id] = (idMap[id] || 0) + 1;
120
+ }
121
+ });
122
+
123
+ // 检查ID是否有重复
124
+ const duplicates = Object.entries(idMap).filter(([id, count]) => count > 1);
125
+ if (duplicates.length > 0) {
126
+ console.error('发现重复的概念ID:', duplicates);
127
+ }
128
+
129
+ return {
130
+ totalCards: cards.length,
131
+ cardsWithIds: Object.keys(idMap).length,
132
+ duplicateIds: duplicates.map(([id]) => id)
133
+ };
134
+ }
135
+
136
+ // 提供全局诊断接口
137
+ window.diagnoseConcepts = checkAllConceptCards;
138
+
139
+ // 添加调试和错误日志
140
+ window.addEventListener('error', function(e) {
141
+ console.error('🔴 JavaScript 错误:', e.message, 'at', e.filename, 'line', e.lineno);
142
+ });
config.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ """
3
+ Configuration settings for the Educational LLM Application
4
+ """
5
+ # OpenAI API settings
6
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Or replace with your actual API key
7
+
8
+ OPENAI_MODEL = "gpt-4o" # Model name
9
+ OPENAI_TIMEOUT = 30 # Timeout in seconds
10
+ OPENAI_MAX_RETRIES = 3 # Maximum number of retries
11
+
12
+ # Application settings
13
+ DEBUG_MODE = True # Enable debug output
14
+ USE_FALLBACK_DATA = True # Set to True to enable fallback to mock data when API calls fail
15
+ CACHE_ENABLED = False # Disable caching to always get fresh responses
few_shots_examples.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Stores all the few-shot examples
2
+
3
+ # Few-shot examples for concept decomposition
4
+ CONCEPT_DECOMPOSITION_EXAMPLES_1= {
5
+ "main_concept": "Algebra",
6
+ "Explanation": "Algebra is a branch of mathematics that uses symbols and letters to represent numbers and their relationships. It helps solve equations and understand patterns.",
7
+ "sub_concepts": [
8
+ {
9
+ "id": "1.1",
10
+ "name": "Functions",
11
+ "description": "Functions show how one quantity depends on another and are used to model relationships.",
12
+ "difficulty": "basic"
13
+ },
14
+ {
15
+ "id": "1.2",
16
+ "name": "The Domain and Range of a Function",
17
+ "description": "Domain is the set of input values, and range is the set of output values for a function.",
18
+ "difficulty": "basic"
19
+ },
20
+ {
21
+ "id": "1.3",
22
+ "name": "Intervals and Interval Notation",
23
+ "description": "A way to write ranges of values between two endpoints.",
24
+ "difficulty": "basic"
25
+ },
26
+ {
27
+ "id": "1.4",
28
+ "name": "Even and Odd Functions",
29
+ "description": "Even functions are symmetrical about the y-axis; odd functions are symmetrical about the origin.",
30
+ "difficulty": "intermediate"
31
+ },
32
+ {
33
+ "id": "1.5",
34
+ "name": "Function Graph Transformations: Vertical and Horizontal Shifts",
35
+ "description": "Moves the graph of a function up, down, left, or right.",
36
+ "difficulty": "intermediate"
37
+ },
38
+ {
39
+ "id": "1.6",
40
+ "name": "Function Graph Transformations: Stretching, Reflecting, and Compressing",
41
+ "description": "Changes the shape of a graph by stretching, flipping, or squeezing it.",
42
+ "difficulty": "intermediate"
43
+ },
44
+ {
45
+ "id": "1.7",
46
+ "name": "Function Graphs: Combined Transformations",
47
+ "description": "Applies multiple changes to a function’s graph to show complex transformations.",
48
+ "difficulty": "advanced"
49
+ },
50
+ {
51
+ "id": "1.8",
52
+ "name": "Arithmetic Operations on Functions",
53
+ "description": "Combines functions by adding, subtracting, multiplying, or dividing them.",
54
+ "difficulty": "intermediate"
55
+ },
56
+ {
57
+ "id": "1.9",
58
+ "name": "Composition of Functions",
59
+ "description": "Applies one function to the result of another to build new functions.",
60
+ "difficulty": "advanced"
61
+ },
62
+ {
63
+ "id": "1.10",
64
+ "name": "One-to-One Functions",
65
+ "description": "A function where each input has a unique output, important for finding inverses.",
66
+ "difficulty": "advanced"
67
+ },
68
+ {
69
+ "id": "1.11",
70
+ "name": "Linear, Quadratic, and Cubic Function Models",
71
+ "description": "Basic function types that model different patterns of change.",
72
+ "difficulty": "basic"
73
+ },
74
+ {
75
+ "id": "1.12",
76
+ "name": "Exponential Models",
77
+ "description": "Functions that model rapid growth or decay, used in real-life applications.",
78
+ "difficulty": "intermediate"
79
+ }
80
+ ],
81
+ "relationships": [
82
+ {
83
+ "source": "1.1",
84
+ "target": "1.2",
85
+ "type": "prerequisite",
86
+ "explanation": "Understanding functions is necessary to define their domain and range."
87
+ },
88
+ {
89
+ "source": "1.2",
90
+ "target": "1.3",
91
+ "type": "related",
92
+ "explanation": "Interval notation is commonly used when expressing domain and range."
93
+ },
94
+ {
95
+ "source": "1.1",
96
+ "target": "1.4",
97
+ "type": "prerequisite",
98
+ "explanation": "You need to understand basic function behavior before classifying them as even or odd."
99
+ },
100
+ {
101
+ "source": "1.1",
102
+ "target": "1.5",
103
+ "type": "prerequisite",
104
+ "explanation": "Understanding functions is key to learning how their graphs shift."
105
+ },
106
+ {
107
+ "source": "1.5",
108
+ "target": "1.6",
109
+ "type": "prerequisite",
110
+ "explanation": "Once shifts are understood, more complex transformations like stretching or reflecting can be learned."
111
+ },
112
+ {
113
+ "source": "1.6",
114
+ "target": "1.7",
115
+ "type": "prerequisite",
116
+ "explanation": "Understanding individual transformations helps in learning combined transformations."
117
+ },
118
+ {
119
+ "source": "1.1",
120
+ "target": "1.8",
121
+ "type": "prerequisite",
122
+ "explanation": "Understanding functions is necessary before performing operations on them."
123
+ },
124
+ {
125
+ "source": "1.8",
126
+ "target": "1.9",
127
+ "type": "prerequisite",
128
+ "explanation": "You need to be comfortable with function operations before learning function composition."
129
+ },
130
+ {
131
+ "source": "1.9",
132
+ "target": "1.10",
133
+ "type": "prerequisite",
134
+ "explanation": "Understanding composition is helpful when studying one-to-one functions and their inverses."
135
+ },
136
+ {
137
+ "source": "1.1",
138
+ "target": "1.11",
139
+ "type": "related",
140
+ "explanation": "Function models are real-world applications of the basic function concept."
141
+ },
142
+ {
143
+ "source": "1.11",
144
+ "target": "1.12",
145
+ "type": "prerequisite",
146
+ "explanation": "Linear, quadratic, and cubic models are usually taught before exponential models."
147
+ }
148
+ ]
149
+ }
150
+
151
+ CONCEPT_DECOMPOSITION_EXAMPLES_2 = {
152
+ "main_concept": "Normal Distribution",
153
+ "Explanation": "A normal distribution is a symmetric, bell-shaped curve where the mean represents the center and the variance measures how spread out the data is. Most data falls close to the mean, especially within a few standard deviations.",
154
+ "sub_concepts": [
155
+ {
156
+ "id": "1.1",
157
+ "name": "Mean of a Normal Distribution",
158
+ "description": "The average value of the data and the center point of the bell curve.",
159
+ "difficulty": "basic"
160
+ },
161
+ {
162
+ "id": "1.2",
163
+ "name": "Variance of a Normal Distribution",
164
+ "description": "A measure of how much the data spreads out from the mean.",
165
+ "difficulty": "intermediate"
166
+ },
167
+ {
168
+ "id": "1.3",
169
+ "name": "Standard Deviation",
170
+ "description": "The square root of variance; it shows the average distance of data from the mean.",
171
+ "difficulty": "intermediate"
172
+ },
173
+ {
174
+ "id": "1.4",
175
+ "name": "Properties of the Normal Distribution",
176
+ "description": "The distribution is symmetric with the mean, median, and mode all equal, forming a bell curve.",
177
+ "difficulty": "basic"
178
+ },
179
+ {
180
+ "id": "1.5",
181
+ "name": "Empirical Rule (68-95-99.7 Rule)",
182
+ "description": "Describes how data is spread in a normal distribution using standard deviations.",
183
+ "difficulty": "intermediate"
184
+ }
185
+ ],
186
+ "relationships": [
187
+ {
188
+ "source": "1.1",
189
+ "target": "1.4",
190
+ "type": "prerequisite",
191
+ "explanation": "Understanding the mean is necessary to grasp the symmetry and central tendency of the normal distribution."
192
+ },
193
+ {
194
+ "source": "1.2",
195
+ "target": "1.3",
196
+ "type": "prerequisite",
197
+ "explanation": "Standard deviation is derived from the variance, so variance must be understood first."
198
+ },
199
+ {
200
+ "source": "1.3",
201
+ "target": "1.5",
202
+ "type": "prerequisite",
203
+ "explanation": "The empirical rule is based on standard deviations, so understanding standard deviation is essential."
204
+ },
205
+ {
206
+ "source": "1.4",
207
+ "target": "1.5",
208
+ "type": "related",
209
+ "explanation": "The empirical rule is one of the defining properties of a normal distribution."
210
+ }
211
+ ]
212
+ }
llm_chain.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LLM Chain implementation using Langchain for educational concept analysis
3
+ """
4
+
5
+ from typing import Dict, Any, List
6
+ from langchain.chat_models import ChatOpenAI
7
+ from langchain.prompts import ChatPromptTemplate
8
+ from langchain.output_parsers import PydanticOutputParser
9
+ from langchain.chains import LLMChain
10
+ from pydantic import BaseModel, Field
11
+ from config import OPENAI_API_KEY, OPENAI_MODEL
12
+
13
+ # Define Pydantic models for structured output
14
+ class Concept(BaseModel):
15
+ """Model for a single concept"""
16
+ id: str = Field(description="Unique identifier for the concept")
17
+ name: str = Field(description="Name of the concept")
18
+ description: str = Field(description="Brief description of the concept")
19
+ difficulty: str = Field(description="Difficulty level: basic, intermediate, or advanced")
20
+
21
+ class Relationship(BaseModel):
22
+ """Model for relationship between concepts"""
23
+ source: str = Field(description="Source concept ID")
24
+ target: str = Field(description="Target concept ID")
25
+ type: str = Field(description="Type of relationship: prerequisite or related")
26
+ explanation: str = Field(description="Explanation of why this relationship exists")
27
+
28
+ class ConceptMap(BaseModel):
29
+ """Model for complete concept map"""
30
+ main_concept: str = Field(description="Main concept being analyzed")
31
+ sub_concepts: List[Concept] = Field(description="List of sub-concepts")
32
+ relationships: List[Relationship] = Field(description="List of relationships between concepts")
33
+
34
+ class Example(BaseModel):
35
+ """Model for concept examples"""
36
+ problem: str = Field(description="Example problem")
37
+ solution: str = Field(description="Step-by-step solution")
38
+ difficulty: str = Field(description="Difficulty level: Easy, Medium, or Hard")
39
+
40
+ class Resource(BaseModel):
41
+ """Model for learning resources"""
42
+ type: str = Field(description="Type of resource (Video/Article/Interactive/Book)")
43
+ title: str = Field(description="Resource title")
44
+ description: str = Field(description="Resource description")
45
+ link: str = Field(description="Optional resource link")
46
+
47
+ class ConceptExplanation(BaseModel):
48
+ """Model for detailed concept explanation"""
49
+ explanation: str = Field(description="Detailed concept explanation")
50
+ examples: List[Example] = Field(description="List of example problems and solutions")
51
+ resources: List[Resource] = Field(description="List of learning resources")
52
+ practice_questions: List[Example] = Field(description="List of practice questions")
53
+
54
+ class EducationalLLMChain:
55
+ """
56
+ Chain for processing educational concepts using LLM
57
+ """
58
+ def __init__(self):
59
+ """Initialize the LLM and parsers"""
60
+ self.llm = ChatOpenAI(
61
+ model=OPENAI_MODEL,
62
+ temperature=0.1,
63
+ openai_api_key=OPENAI_API_KEY
64
+ )
65
+
66
+ # Initialize output parsers
67
+ self.concept_parser = PydanticOutputParser(pydantic_object=ConceptMap)
68
+ self.explanation_parser = PydanticOutputParser(pydantic_object=ConceptExplanation)
69
+
70
+ # Create decomposition chain
71
+ self.decomposition_chain = self._create_decomposition_chain()
72
+
73
+ # Create explanation chain
74
+ self.explanation_chain = self._create_explanation_chain()
75
+
76
+ def _create_decomposition_chain(self) -> LLMChain:
77
+ """
78
+ Create chain for concept decomposition
79
+
80
+ Returns:
81
+ LLMChain for decomposing concepts
82
+ """
83
+ template = """You are an expert educational AI tutor.
84
+
85
+ Analyze this question for a {grade} level student studying {subject}.
86
+
87
+ Question: {question}
88
+
89
+ Student Background:
90
+ - Grade Level: {grade}
91
+ - Subject: {subject}
92
+ - Learning Needs: {learning_needs}
93
+
94
+ Break down the concepts needed to understand this question into a knowledge graph.
95
+ Consider the student's grade level and background knowledge.
96
+
97
+ {format_instructions}
98
+ """
99
+
100
+ prompt = ChatPromptTemplate.from_template(
101
+ template=template,
102
+ partial_variables={
103
+ "format_instructions": self.concept_parser.get_format_instructions()
104
+ }
105
+ )
106
+
107
+ return LLMChain(llm=self.llm, prompt=prompt)
108
+
109
+ def _create_explanation_chain(self) -> LLMChain:
110
+ """
111
+ Create chain for concept explanation
112
+
113
+ Returns:
114
+ LLMChain for explaining concepts
115
+ """
116
+ template = """You are an expert educational tutor.
117
+
118
+ Explain this concept for a {grade} level student studying {subject}:
119
+
120
+ Concept: {concept_name}
121
+ Description: {concept_description}
122
+
123
+ Student Background:
124
+ - Grade Level: {grade}
125
+ - Subject: {subject}
126
+ - Learning Needs: {learning_needs}
127
+
128
+ Provide a detailed explanation, examples, resources, and practice questions.
129
+
130
+ {format_instructions}
131
+ """
132
+
133
+ prompt = ChatPromptTemplate.from_template(
134
+ template=template,
135
+ partial_variables={
136
+ "format_instructions": self.explanation_parser.get_format_instructions()
137
+ }
138
+ )
139
+
140
+ return LLMChain(llm=self.llm, prompt=prompt)
141
+
142
+ async def decompose_concepts(
143
+ self,
144
+ question: str,
145
+ grade: str,
146
+ subject: str,
147
+ learning_needs: str
148
+ ) -> ConceptMap:
149
+ """
150
+ Decompose a question into concepts
151
+
152
+ Args:
153
+ question: User's question
154
+ grade: Educational grade level
155
+ subject: Subject area
156
+ learning_needs: Learning needs/goals
157
+
158
+ Returns:
159
+ Structured concept map
160
+ """
161
+ response = await self.decomposition_chain.arun({
162
+ "question": question,
163
+ "grade": grade,
164
+ "subject": subject,
165
+ "learning_needs": learning_needs
166
+ })
167
+
168
+ return self.concept_parser.parse(response)
169
+
170
+ async def explain_concept(
171
+ self,
172
+ concept_name: str,
173
+ concept_description: str,
174
+ grade: str,
175
+ subject: str,
176
+ learning_needs: str
177
+ ) -> ConceptExplanation:
178
+ """
179
+ Generate detailed concept explanation
180
+
181
+ Args:
182
+ concept_name: Name of concept to explain
183
+ concept_description: Brief concept description
184
+ grade: Educational grade level
185
+ subject: Subject area
186
+ learning_needs: Learning needs/goals
187
+
188
+ Returns:
189
+ Structured concept explanation
190
+ """
191
+ response = await self.explanation_chain.arun({
192
+ "concept_name": concept_name,
193
+ "concept_description": concept_description,
194
+ "grade": grade,
195
+ "subject": subject,
196
+ "learning_needs": learning_needs
197
+ })
198
+
199
+ return self.explanation_parser.parse(response)
llm_utils.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions for LLM-related operations
3
+ """
4
+ import json
5
+ import os
6
+ import time
7
+ from typing import Dict, List, Any, Optional
8
+
9
+ # Import from OpenAI newer SDK
10
+ from openai import OpenAI
11
+
12
+ # Import local modules
13
+ from cache_utils import cached_llm_call, get_from_cache, save_to_cache
14
+ from config import OPENAI_API_KEY, OPENAI_MODEL, OPENAI_TIMEOUT, OPENAI_MAX_RETRIES, USE_FALLBACK_DATA, DEBUG_MODE
15
+
16
+ def call_llm(system_prompt: str, user_prompt: str, mock_data: Optional[Dict] = None) -> Dict[str, Any]:
17
+ """
18
+ Call LLM with improved error handling and response validation
19
+
20
+ Args:
21
+ system_prompt: System role prompt
22
+ user_prompt: User input prompt
23
+ mock_data: Mock data for fallback
24
+
25
+ Returns:
26
+ Parsed JSON response from LLM
27
+
28
+ Raises:
29
+ ValueError: If response format is invalid
30
+ Exception: For other API call failures
31
+ """
32
+ cache_key = f"{system_prompt}_{user_prompt}"
33
+ cached_response = get_from_cache(cache_key)
34
+
35
+ if cached_response:
36
+ if DEBUG_MODE:
37
+ print("Using cached response")
38
+ return json.loads(cached_response)
39
+
40
+ try:
41
+ client = OpenAI(api_key=OPENAI_API_KEY)
42
+
43
+ # Make API call with temperature=0.1 for more consistent outputs
44
+ response = client.chat.completions.create(
45
+ model=OPENAI_MODEL,
46
+ messages=[
47
+ {"role": "system", "content": system_prompt},
48
+ {"role": "user", "content": user_prompt}
49
+ ],
50
+ temperature=0.1,
51
+ response_format={"type": "json_object"}
52
+ )
53
+
54
+ content = response.choices[0].message.content
55
+
56
+ # Validate JSON response
57
+ try:
58
+ json_response = json.loads(content)
59
+ validate_response_format(json_response)
60
+ save_to_cache(cache_key, content)
61
+ return json_response
62
+
63
+ except json.JSONDecodeError:
64
+ raise ValueError("Invalid JSON response from LLM")
65
+
66
+ except Exception as e:
67
+ if DEBUG_MODE:
68
+ print(f"LLM API call failed: {str(e)}")
69
+ if USE_FALLBACK_DATA and mock_data:
70
+ return mock_data
71
+ raise
72
+
73
+ def validate_response_format(response: Dict[str, Any]) -> None:
74
+ """
75
+ Validate the format of LLM response
76
+
77
+ Args:
78
+ response: Parsed JSON response
79
+
80
+ Raises:
81
+ ValueError: If required fields are missing or invalid
82
+ """
83
+ required_fields = {
84
+ "decomposition": ["main_concept", "sub_concepts", "relationships"],
85
+ "explanation": ["explanation", "key_points", "examples", "practice", "resources"]
86
+ }
87
+
88
+ # Determine response type and validate fields
89
+ if "main_concept" in response:
90
+ fields = required_fields["decomposition"]
91
+ elif "explanation" in response:
92
+ fields = required_fields["explanation"]
93
+ else:
94
+ raise ValueError("Unknown response format")
95
+
96
+ for field in fields:
97
+ if field not in response:
98
+ raise ValueError(f"Missing required field: {field}")
99
+
100
+ def _do_decompose_concepts(params: Dict[str, Any]) -> Dict[str, Any]:
101
+ """
102
+ Execute concept decomposition (internal function)
103
+
104
+ Args:
105
+ params: Parameter dictionary containing user profile and question
106
+
107
+ Returns:
108
+ Decomposed concept data
109
+ """
110
+ from prompts import generate_decomposition_prompt
111
+
112
+ user_profile = params.get("user_profile", {})
113
+ question = params.get("question", "")
114
+
115
+ system_prompt, user_prompt = generate_decomposition_prompt(
116
+ question,
117
+ user_profile.get("grade", "Not specified"),
118
+ user_profile.get("subject", "Not specified"),
119
+ user_profile.get("needs", "Not specified")
120
+ )
121
+
122
+ from concept_handler import MOCK_DECOMPOSITION_RESULT
123
+ response = call_llm(system_prompt, user_prompt, MOCK_DECOMPOSITION_RESULT)
124
+ return response
125
+
126
+ def decompose_concepts(user_profile: Dict[str, str], question: str) -> Dict[str, Any]:
127
+ """
128
+ Use LLM to break down user questions into multiple concepts, with caching
129
+
130
+ Args:
131
+ user_profile: User profile information
132
+ question: User question
133
+
134
+ Returns:
135
+ Dictionary containing main concept, sub-concepts, and relationships
136
+ """
137
+ params = {
138
+ "user_profile": user_profile,
139
+ "question": question
140
+ }
141
+
142
+ return cached_llm_call("decompose", params, _do_decompose_concepts)
143
+
144
+ def _do_get_concept_explanation(params: Dict[str, Any]) -> Dict[str, Any]:
145
+ """
146
+ Execute concept explanation (internal function)
147
+
148
+ Args:
149
+ params: Parameter dictionary containing user profile and concept information
150
+
151
+ Returns:
152
+ Concept explanation data
153
+ """
154
+ from prompts import generate_explanation_prompt
155
+
156
+ user_profile = params.get("user_profile", {})
157
+ concept_id = params.get("concept_id", "")
158
+ concept_name = params.get("concept_name", "")
159
+ concept_description = params.get("concept_description", "")
160
+
161
+ system_prompt, user_prompt = generate_explanation_prompt(
162
+ concept_name,
163
+ concept_description,
164
+ "", # Original question (not needed here)
165
+ user_profile.get("grade", "Not specified"),
166
+ user_profile.get("subject", "Not specified"),
167
+ user_profile.get("needs", "Not specified")
168
+ )
169
+
170
+ from concept_handler import MOCK_EXPLANATION_RESULT
171
+ response = call_llm(system_prompt, user_prompt, MOCK_EXPLANATION_RESULT)
172
+ return response
173
+
174
+ def get_concept_explanation(user_profile: Dict[str, str], concept_id: str,
175
+ concept_name: str, concept_description: str) -> Dict[str, Any]:
176
+ """
177
+ Get detailed explanation and learning resources for a specific concept, with caching
178
+
179
+ Args:
180
+ user_profile: User profile information
181
+ concept_id: Concept ID
182
+ concept_name: Concept name
183
+ concept_description: Brief concept description
184
+
185
+ Returns:
186
+ Dictionary containing explanation, examples, and resources
187
+ """
188
+ params = {
189
+ "user_profile": user_profile,
190
+ "concept_id": concept_id,
191
+ "concept_name": concept_name,
192
+ "concept_description": concept_description
193
+ }
194
+
195
+ return cached_llm_call("explain", params, _do_get_concept_explanation)
prompts.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Prompt Template Module - Stores functions for generating LLM prompts
3
+ """
4
+
5
+ from typing import Tuple
6
+
7
+ import few_shots_examples
8
+
9
+
10
+ def generate_decomposition_prompt(question: str, grade: str, subject: str, learning_needs: str) -> Tuple[str, str]:
11
+ """
12
+ Generate prompts for concept decomposition based on user background
13
+
14
+ Args:
15
+ question: User's question
16
+ grade: Educational grade level
17
+ subject: Subject area
18
+ learning_needs: Specific learning needs/goals
19
+
20
+ Returns:
21
+ Tuple of (system_prompt, user_prompt)
22
+ """
23
+ system_prompt = """You are an expert educational AI tutor. Your task is to break down complex questions into fundamental concepts that are appropriate for the student's grade level and background knowledge.
24
+
25
+ Please analyze the question and create a knowledge graph that shows:
26
+ 1. The main concept being asked about
27
+ 2. Essential sub-concepts needed to understand the main concept
28
+ 3. The relationships between these concepts (prerequisite or related)
29
+
30
+ Consider the student's grade level and subject background when determining:
31
+ - Which concepts to include
32
+ - How detailed the explanations should be
33
+ - The appropriate terminology to use
34
+ - The complexity level of relationships
35
+
36
+ Your response must be in the following JSON format:
37
+ {
38
+ "main_concept": "Core concept name",
39
+ "Explanation": "Brief, grade-appropriate explanation of the main concept",
40
+ "sub_concepts": [
41
+ {
42
+ "id": "unique_id",
43
+ "name": "Concept name",
44
+ "description": "Brief, grade-appropriate description",
45
+ "difficulty": "basic|intermediate|advanced"
46
+ }
47
+ ],
48
+ "relationships": [
49
+ {
50
+ "source": "concept_id",
51
+ "target": "concept_id",
52
+ "type": "prerequisite|related",
53
+ "explanation": "Why this relationship exists"
54
+ }
55
+ ]
56
+ }"""
57
+
58
+ user_prompt = f"""
59
+ Break down the concepts needed to understand this question, considering:
60
+ 1. The student's current grade level and subject knowledge
61
+ 2. Any specific learning needs mentioned
62
+ 3. The logical progression of concepts needed
63
+ 4. Appropriate difficulty levels for each concept
64
+
65
+ Please ensure all explanations and terminology are appropriate for a {grade} level student.
66
+
67
+
68
+ Here is an example:
69
+ Input: "What is Algebra"
70
+ OUTPUT: {str(few_shots_examples.CONCEPT_DECOMPOSITION_EXAMPLES_1)}
71
+
72
+ Input: "What is the relationship between the mean and variance of a normal distribution."
73
+ OUTPUT: {str(few_shots_examples.CONCEPT_DECOMPOSITION_EXAMPLES_2)},
74
+
75
+ Please analyze this question for a {grade} level student studying {subject}.
76
+
77
+ Question: {question}
78
+
79
+ Student Background:
80
+ - Grade Level: {grade}
81
+ - Subject: {subject}
82
+ - Learning Needs: {learning_needs if learning_needs else 'Not specified'}
83
+
84
+ """
85
+
86
+ return system_prompt, user_prompt
87
+
88
+
89
+ def generate_explanation_prompt(concept_name, concept_description, question, grade, subject, learning_needs):
90
+ """
91
+ Generate prompt for concept explanation
92
+
93
+ Args:
94
+ concept_name: Concept name
95
+ concept_description: Concept description
96
+ question: Original question
97
+ grade: Grade level
98
+ subject: Subject
99
+ learning_needs: Learning needs
100
+
101
+ Returns:
102
+ Tuple of system prompt and user prompt
103
+ """
104
+ system_prompt = """You are an AI assistant in the educational field, specializing in explaining academic concepts and providing constructive learning resources.
105
+ Your task is to deeply explain the specified concept, providing examples, resources, and practice questions.
106
+ Your answer must be in valid JSON format, including the following fields:
107
+ {
108
+ "explanation": "Detailed concept explanation",
109
+ "examples": [
110
+ {
111
+ "problem": "Example problem",
112
+ "solution": "Step-by-step solution",
113
+ "difficulty": "Difficulty level (Easy/Medium/Hard)"
114
+ },
115
+ ...
116
+ ],
117
+ "resources": [
118
+ {
119
+ "type": "Resource type (Video/Article/Interactive/Book)",
120
+ "title": "Resource title",
121
+ "description": "Resource description",
122
+ "link": "Link (optional)"
123
+ },
124
+ ...
125
+ ],
126
+ "practice_questions": [
127
+ {
128
+ "question": "Practice question",
129
+ "answer": "Answer",
130
+ "difficulty": "Difficulty level (Easy/Medium/Hard)"
131
+ },
132
+ ...
133
+ ]
134
+ }
135
+ The explanation should be clear, comprehensive, and appropriate for a {grade} level {subject} student's language and difficulty level.
136
+ Examples should include 1-3 items, ranging from simple to complex, with step-by-step solutions.
137
+ Resources should be diverse, including videos, articles, interactive tools, etc.
138
+ Practice questions should include 2-4 items of varying difficulty to help students solidify the concept."""
139
+
140
+ user_prompt = f"""Please explain the following concept in detail, considering the student's grade, subject background, and learning needs:
141
+
142
+ Concept: {concept_name}
143
+ Description: {concept_description}
144
+ Original Question: {question}
145
+ Grade: {grade}
146
+ Subject: {subject}
147
+ Learning Needs: {learning_needs if learning_needs else "Not specified"}
148
+
149
+ Please provide a detailed explanation, relevant examples, learning resources, and practice questions, and return JSON in the specified format."""
150
+
151
+ return system_prompt, user_prompt
152
+
153
+
154
+ # Prompt template for decomposing concepts
155
+ CONCEPT_DECOMPOSITION_PROMPT = """
156
+ Based on the user's question and profile information, please break down the question into multiple essential basic concepts, and output the relationships between these concepts in JSON format.
157
+
158
+ User Profile:
159
+ - Grade: {grade}
160
+ - Subject: {subject}
161
+ - Needs: {needs}
162
+
163
+ User Question: {question}
164
+
165
+ Please follow the format below for your response:
166
+ ```json
167
+ {
168
+ "main_concept": "Main concept name",
169
+ "sub_concepts": [
170
+ {
171
+ "id": "concept_1",
172
+ "name": "Concept 1 name",
173
+ "description": "Brief description of concept 1"
174
+ },
175
+ {
176
+ "id": "concept_2",
177
+ "name": "Concept 2 name",
178
+ "description": "Brief description of concept 2"
179
+ }
180
+ // More concepts...
181
+ ],
182
+ "relationships": [
183
+ {
184
+ "source": "concept_1",
185
+ "target": "concept_2",
186
+ "type": "prerequisite" // Can be "prerequisite" or "related"
187
+ }
188
+ // More relationships...
189
+ ]
190
+ }
191
+ ```
192
+
193
+ Please ensure the generated JSON is correctly formatted, each concept has a unique ID, and the relationships clearly express the dependencies between concepts.
194
+ """
195
+
196
+ # Prompt template for generating concept explanations and learning resources
197
+ CONCEPT_EXPLANATION_PROMPT = """
198
+ Please generate detailed explanations, examples, and learning resource suggestions for the following concept based on the user's profile information.
199
+
200
+ User Profile:
201
+ - Grade: {grade}
202
+ - Subject: {subject}
203
+ - Needs: {needs}
204
+
205
+ Selected Concept: {concept_name}
206
+ Concept Description: {concept_description}
207
+
208
+ Please provide your answer in the following format:
209
+ ```json
210
+ {
211
+ "explanation": "Detailed explanation of the concept, appropriate for the user's grade level...",
212
+ "examples": [
213
+ {
214
+ "problem": "Example 1...",
215
+ "solution": "Solution process and answer...",
216
+ "difficulty": "Easy/Medium/Hard"
217
+ },
218
+ // More examples...
219
+ ],
220
+ "resources": [
221
+ {
222
+ "type": "Video/Book/Website",
223
+ "title": "Resource title",
224
+ "description": "Resource description",
225
+ "link": "Link (optional)"
226
+ },
227
+ // More resources...
228
+ ],
229
+ "practice_questions": [
230
+ {
231
+ "question": "Practice question 1...",
232
+ "answer": "Answer...",
233
+ "difficulty": "Easy/Medium/Hard"
234
+ },
235
+ // More practice questions...
236
+ ]
237
+ }
238
+ ```
239
+
240
+ Please ensure the content is appropriate for the user's grade level, the explanations are easy to understand, and provide valuable learning resources and examples.
241
+ """
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio==5.23.1
2
+ langchain==0.0.352
3
+ matplotlib==3.7.3
4
+ networkx==3.1
5
+ openai==1.68.2
6
+ pydantic==2.10.6
7
+ langchain-core==0.1.1
run.bat ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+ REM Batch script to start the Educational LLM Application
3
+
4
+ REM Check Python environment
5
+ where python >nul 2>nul
6
+ if %ERRORLEVEL% neq 0 (
7
+ echo Error: Python is required but not found. Please install Python.
8
+ exit /b 1
9
+ )
10
+
11
+ REM Check virtual environment
12
+ if not exist venv (
13
+ echo Creating Python virtual environment...
14
+ python -m venv venv
15
+ call venv\Scripts\activate
16
+ echo Installing dependencies...
17
+ pip install -r requirements.txt
18
+ ) else (
19
+ call venv\Scripts\activate
20
+ )
21
+
22
+ REM Run application
23
+ echo Starting Educational LLM Application...
24
+ python app.py
25
+
26
+ REM Deactivate virtual environment
27
+ call deactivate
run.sh ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Script to start the Educational LLM Application
3
+
4
+ # Color definitions
5
+ GREEN='\033[0;32m'
6
+ YELLOW='\033[1;33m'
7
+ NC='\033[0m' # No Color
8
+
9
+ # Welcome message
10
+ echo -e "${GREEN}=== Educational LLM Application Startup Script ===${NC}"
11
+ echo -e "${YELLOW}Setting up environment...${NC}"
12
+
13
+ # Check Python3 exists
14
+ if ! command -v python3 &> /dev/null; then
15
+ echo "Error: Python3 not found. Please install Python3 first."
16
+ exit 1
17
+ fi
18
+
19
+ # Create virtual environment (if it doesn't exist)
20
+ if [ ! -d "venv" ]; then
21
+ echo -e "${YELLOW}Creating virtual environment...${NC}"
22
+ python3 -m venv venv
23
+ if [ $? -ne 0 ]; then
24
+ echo "Error: Unable to create virtual environment. Please check your Python installation."
25
+ exit 1
26
+ fi
27
+ fi
28
+
29
+ # Activate virtual environment
30
+ echo -e "${YELLOW}Activating virtual environment...${NC}"
31
+ source venv/bin/activate
32
+ if [ $? -ne 0 ]; then
33
+ echo "Error: Unable to activate virtual environment."
34
+ exit 1
35
+ fi
36
+
37
+ # Check requirements.txt and install dependencies
38
+ echo -e "${YELLOW}Installing dependencies...${NC}"
39
+ pip install -r requirements.txt
40
+ if [ $? -ne 0 ]; then
41
+ echo "Error: Unable to install dependencies. Please check your network connection and requirements.txt file."
42
+ exit 1
43
+ fi
44
+
45
+ # Ensure cache directory exists
46
+ echo -e "${YELLOW}Creating cache directory...${NC}"
47
+ mkdir -p cache
48
+
49
+ # Start application
50
+ echo -e "${GREEN}Starting Educational LLM Application...${NC}"
51
+ echo -e "${YELLOW}The application will open in your browser${NC}"
52
+ python app.py
53
+
54
+ # Exit virtual environment
55
+ deactivate
visualization.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Visualization Module - Generate concept knowledge graphs
3
+ """
4
+
5
+ import matplotlib.pyplot as plt
6
+ import networkx as nx
7
+ import matplotlib
8
+ import io
9
+ import base64
10
+ import os
11
+ from typing import Dict, Any, List
12
+
13
+ # Ensure using Agg backend (no need for GUI)
14
+ matplotlib.use('Agg')
15
+
16
+ # Set up Chinese font support
17
+ # Try to find suitable Chinese fonts
18
+ font_found = False
19
+ chinese_fonts = ['SimHei', 'Microsoft YaHei', 'WenQuanYi Micro Hei', 'AR PL UMing CN', 'STSong', 'NSimSun', 'FangSong', 'KaiTi']
20
+ for font in chinese_fonts:
21
+ try:
22
+ matplotlib.font_manager.findfont(font)
23
+ matplotlib.rcParams['font.sans-serif'] = [font, 'DejaVu Sans', 'Arial Unicode MS', 'sans-serif']
24
+ print(f"Using Chinese font: {font}")
25
+ font_found = True
26
+ break
27
+ except:
28
+ continue
29
+
30
+ if not font_found:
31
+ print("Warning: No suitable Chinese font found, using default font")
32
+ matplotlib.rcParams['font.sans-serif'] = ['DejaVu Sans', 'Arial Unicode MS', 'sans-serif']
33
+
34
+ matplotlib.rcParams['axes.unicode_minus'] = False
35
+ matplotlib.rcParams['font.size'] = 10
36
+
37
+ def create_network_graph(concepts_data: Dict[str, Any]) -> str:
38
+ """
39
+ Create an enhanced network visualization of concept relationships
40
+
41
+ Args:
42
+ concepts_data: Dictionary containing concept hierarchy and relationships
43
+
44
+ Returns:
45
+ Base64 encoded PNG image as data URL
46
+ """
47
+ G = nx.DiGraph()
48
+
49
+ # Clear any existing plots
50
+ plt.clf()
51
+ plt.close('all')
52
+
53
+ # Increase figure size and DPI for better display
54
+ plt.figure(figsize=(14, 10), dpi=150, facecolor='white')
55
+
56
+ # Add nodes with difficulty-based colors
57
+ difficulty_colors = {
58
+ 'basic': '#90CAF9', # Light blue
59
+ 'intermediate': '#FFB74D', # Orange
60
+ 'advanced': '#EF5350' # Red
61
+ }
62
+
63
+ # Only add subconcepts (skip main concept)
64
+ for concept in concepts_data.get("sub_concepts", []):
65
+ concept_id = concept.get("id")
66
+ concept_name = concept.get("name")
67
+ difficulty = concept.get("difficulty", "basic")
68
+
69
+ if concept_id and concept_name:
70
+ G.add_node(
71
+ concept_id,
72
+ name=concept_name,
73
+ type="sub",
74
+ difficulty=difficulty,
75
+ color=difficulty_colors.get(difficulty, '#90CAF9')
76
+ )
77
+
78
+ # Add relationships between subconcepts only
79
+ for relation in concepts_data.get("relationships", []):
80
+ source = relation.get("source")
81
+ target = relation.get("target")
82
+ rel_type = relation.get("type")
83
+
84
+ # Skip relationships involving main concept
85
+ if (source and target and
86
+ source in G.nodes and target in G.nodes): # Only add edges between existing subconcepts
87
+ G.add_edge(
88
+ source,
89
+ target,
90
+ type=rel_type
91
+ )
92
+
93
+ # Optimize layout parameters and increase node spacing
94
+ pos = nx.spring_layout(
95
+ G,
96
+ k=2.0, # Increase node spacing
97
+ iterations=100, # Increase iterations for better layout
98
+ seed=42 # Fixed random seed for consistent layout
99
+ )
100
+
101
+ # Draw nodes with difficulty-based colors
102
+ node_colors = [G.nodes[node].get('color', '#90CAF9') for node in G.nodes()]
103
+
104
+ # All nodes are now the same size since there's no main concept
105
+ node_sizes = [1500 for _ in G.nodes()]
106
+
107
+ # Draw nodes
108
+ nx.draw_networkx_nodes(
109
+ G, pos,
110
+ node_color=node_colors,
111
+ node_size=node_sizes,
112
+ alpha=0.8
113
+ )
114
+
115
+ # Draw edges with different styles for different relationship types
116
+ edges_prerequisite = [(u, v) for (u, v, d) in G.edges(data=True) if d.get('type') == 'prerequisite']
117
+ edges_related = [(u, v) for (u, v, d) in G.edges(data=True) if d.get('type') == 'related']
118
+
119
+ # Draw edges with curves to avoid overlap
120
+ nx.draw_networkx_edges(
121
+ G, pos,
122
+ edgelist=edges_prerequisite,
123
+ edge_color='red',
124
+ width=2,
125
+ connectionstyle="arc3,rad=0.2", # Add curve
126
+ arrowsize=20,
127
+ arrowstyle='->',
128
+ min_source_margin=30,
129
+ min_target_margin=30
130
+ )
131
+ nx.draw_networkx_edges(
132
+ G, pos,
133
+ edgelist=edges_related,
134
+ edge_color='blue',
135
+ style='dashed',
136
+ width=1.5,
137
+ connectionstyle="arc3,rad=-0.2", # Add reverse curve
138
+ arrowsize=15,
139
+ arrowstyle='->',
140
+ min_source_margin=25,
141
+ min_target_margin=25
142
+ )
143
+
144
+ # Optimize label display
145
+ labels = {
146
+ node: G.nodes[node].get('name', node)
147
+ for node in G.nodes()
148
+ }
149
+
150
+ # Calculate label position offsets
151
+ label_pos = {
152
+ node: (coord[0], coord[1] + 0.08) # Offset labels upward
153
+ for node, coord in pos.items()
154
+ }
155
+
156
+ # Use larger font size and add text background
157
+ nx.draw_networkx_labels(
158
+ G,
159
+ label_pos,
160
+ labels,
161
+ font_size=12, # Increase font size
162
+ font_weight='bold',
163
+ bbox={ # Add text background
164
+ 'facecolor': 'white',
165
+ 'edgecolor': '#E0E0E0',
166
+ 'alpha': 0.9,
167
+ 'pad': 6,
168
+ 'boxstyle': 'round,pad=0.5'
169
+ }
170
+ )
171
+
172
+ # Adjust legend position and size
173
+ legend_elements = [
174
+ plt.Line2D([0], [0], color='red', lw=2, label='Prerequisite'),
175
+ plt.Line2D([0], [0], color='blue', linestyle='--', lw=2, label='Related'),
176
+ plt.Line2D([0], [0], marker='o', color='w', label='Basic', markerfacecolor='#90CAF9', markersize=12),
177
+ plt.Line2D([0], [0], marker='o', color='w', label='Intermediate', markerfacecolor='#FFB74D', markersize=12),
178
+ plt.Line2D([0], [0], marker='o', color='w', label='Advanced', markerfacecolor='#EF5350', markersize=12)
179
+ ]
180
+ plt.legend(
181
+ handles=legend_elements,
182
+ loc='upper right',
183
+ bbox_to_anchor=(1.2, 1),
184
+ fontsize=10,
185
+ frameon=True,
186
+ facecolor='white',
187
+ edgecolor='none',
188
+ shadow=True
189
+ )
190
+
191
+ # Add title showing the main concept without creating a node for it
192
+ main_concept = concepts_data.get("main_concept", "Concept Map")
193
+ plt.title(f"Concept Map: {main_concept}", pad=20, fontsize=14, fontweight='bold')
194
+
195
+ # Increase graph margins
196
+ plt.margins(x=0.2, y=0.2)
197
+ plt.axis('off')
198
+ plt.tight_layout()
199
+
200
+ # Add padding when saving the image
201
+ buf = io.BytesIO()
202
+ plt.savefig(
203
+ buf,
204
+ format='png',
205
+ bbox_inches='tight',
206
+ dpi=150,
207
+ pad_inches=0.5
208
+ )
209
+ plt.close('all')
210
+ buf.seek(0)
211
+
212
+ return "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode('utf-8')