Spaces:
Running
Running
File size: 6,346 Bytes
833dac3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
"""
Utility functions for LLM-related operations
"""
import json
import os
import time
from typing import Dict, List, Any, Optional
# Import from OpenAI newer SDK
from openai import OpenAI
# Import local modules
from cache_utils import cached_llm_call, get_from_cache, save_to_cache
from config import OPENAI_API_KEY, OPENAI_MODEL, OPENAI_TIMEOUT, OPENAI_MAX_RETRIES, USE_FALLBACK_DATA, DEBUG_MODE
def call_llm(system_prompt: str, user_prompt: str, mock_data: Optional[Dict] = None) -> Dict[str, Any]:
"""
Call LLM with improved error handling and response validation
Args:
system_prompt: System role prompt
user_prompt: User input prompt
mock_data: Mock data for fallback
Returns:
Parsed JSON response from LLM
Raises:
ValueError: If response format is invalid
Exception: For other API call failures
"""
cache_key = f"{system_prompt}_{user_prompt}"
cached_response = get_from_cache(cache_key)
if cached_response:
if DEBUG_MODE:
print("Using cached response")
return json.loads(cached_response)
try:
client = OpenAI(api_key=OPENAI_API_KEY)
# Make API call with temperature=0.1 for more consistent outputs
response = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature=0.1,
response_format={"type": "json_object"}
)
content = response.choices[0].message.content
# Validate JSON response
try:
json_response = json.loads(content)
validate_response_format(json_response)
save_to_cache(cache_key, content)
return json_response
except json.JSONDecodeError:
raise ValueError("Invalid JSON response from LLM")
except Exception as e:
if DEBUG_MODE:
print(f"LLM API call failed: {str(e)}")
if USE_FALLBACK_DATA and mock_data:
return mock_data
raise
def validate_response_format(response: Dict[str, Any]) -> None:
"""
Validate the format of LLM response
Args:
response: Parsed JSON response
Raises:
ValueError: If required fields are missing or invalid
"""
required_fields = {
"decomposition": ["main_concept", "sub_concepts", "relationships"],
"explanation": ["explanation", "key_points", "examples", "practice", "resources"]
}
# Determine response type and validate fields
if "main_concept" in response:
fields = required_fields["decomposition"]
elif "explanation" in response:
fields = required_fields["explanation"]
else:
raise ValueError("Unknown response format")
for field in fields:
if field not in response:
raise ValueError(f"Missing required field: {field}")
def _do_decompose_concepts(params: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute concept decomposition (internal function)
Args:
params: Parameter dictionary containing user profile and question
Returns:
Decomposed concept data
"""
from prompts import generate_decomposition_prompt
user_profile = params.get("user_profile", {})
question = params.get("question", "")
system_prompt, user_prompt = generate_decomposition_prompt(
question,
user_profile.get("grade", "Not specified"),
user_profile.get("subject", "Not specified"),
user_profile.get("needs", "Not specified")
)
from concept_handler import MOCK_DECOMPOSITION_RESULT
response = call_llm(system_prompt, user_prompt, MOCK_DECOMPOSITION_RESULT)
return response
def decompose_concepts(user_profile: Dict[str, str], question: str) -> Dict[str, Any]:
"""
Use LLM to break down user questions into multiple concepts, with caching
Args:
user_profile: User profile information
question: User question
Returns:
Dictionary containing main concept, sub-concepts, and relationships
"""
params = {
"user_profile": user_profile,
"question": question
}
return cached_llm_call("decompose", params, _do_decompose_concepts)
def _do_get_concept_explanation(params: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute concept explanation (internal function)
Args:
params: Parameter dictionary containing user profile and concept information
Returns:
Concept explanation data
"""
from prompts import generate_explanation_prompt
user_profile = params.get("user_profile", {})
concept_id = params.get("concept_id", "")
concept_name = params.get("concept_name", "")
concept_description = params.get("concept_description", "")
system_prompt, user_prompt = generate_explanation_prompt(
concept_name,
concept_description,
"", # Original question (not needed here)
user_profile.get("grade", "Not specified"),
user_profile.get("subject", "Not specified"),
user_profile.get("needs", "Not specified")
)
from concept_handler import MOCK_EXPLANATION_RESULT
response = call_llm(system_prompt, user_prompt, MOCK_EXPLANATION_RESULT)
return response
def get_concept_explanation(user_profile: Dict[str, str], concept_id: str,
concept_name: str, concept_description: str) -> Dict[str, Any]:
"""
Get detailed explanation and learning resources for a specific concept, with caching
Args:
user_profile: User profile information
concept_id: Concept ID
concept_name: Concept name
concept_description: Brief concept description
Returns:
Dictionary containing explanation, examples, and resources
"""
params = {
"user_profile": user_profile,
"concept_id": concept_id,
"concept_name": concept_name,
"concept_description": concept_description
}
return cached_llm_call("explain", params, _do_get_concept_explanation) |