NexusLearnAI / llm_utils.py
ChaseHan's picture
Upload 15 files
833dac3 verified
"""
Utility functions for LLM-related operations
"""
import json
import os
import time
from typing import Dict, List, Any, Optional
# Import from OpenAI newer SDK
from openai import OpenAI
# Import local modules
from cache_utils import cached_llm_call, get_from_cache, save_to_cache
from config import OPENAI_API_KEY, OPENAI_MODEL, OPENAI_TIMEOUT, OPENAI_MAX_RETRIES, USE_FALLBACK_DATA, DEBUG_MODE
def call_llm(system_prompt: str, user_prompt: str, mock_data: Optional[Dict] = None) -> Dict[str, Any]:
"""
Call LLM with improved error handling and response validation
Args:
system_prompt: System role prompt
user_prompt: User input prompt
mock_data: Mock data for fallback
Returns:
Parsed JSON response from LLM
Raises:
ValueError: If response format is invalid
Exception: For other API call failures
"""
cache_key = f"{system_prompt}_{user_prompt}"
cached_response = get_from_cache(cache_key)
if cached_response:
if DEBUG_MODE:
print("Using cached response")
return json.loads(cached_response)
try:
client = OpenAI(api_key=OPENAI_API_KEY)
# Make API call with temperature=0.1 for more consistent outputs
response = client.chat.completions.create(
model=OPENAI_MODEL,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature=0.1,
response_format={"type": "json_object"}
)
content = response.choices[0].message.content
# Validate JSON response
try:
json_response = json.loads(content)
validate_response_format(json_response)
save_to_cache(cache_key, content)
return json_response
except json.JSONDecodeError:
raise ValueError("Invalid JSON response from LLM")
except Exception as e:
if DEBUG_MODE:
print(f"LLM API call failed: {str(e)}")
if USE_FALLBACK_DATA and mock_data:
return mock_data
raise
def validate_response_format(response: Dict[str, Any]) -> None:
"""
Validate the format of LLM response
Args:
response: Parsed JSON response
Raises:
ValueError: If required fields are missing or invalid
"""
required_fields = {
"decomposition": ["main_concept", "sub_concepts", "relationships"],
"explanation": ["explanation", "key_points", "examples", "practice", "resources"]
}
# Determine response type and validate fields
if "main_concept" in response:
fields = required_fields["decomposition"]
elif "explanation" in response:
fields = required_fields["explanation"]
else:
raise ValueError("Unknown response format")
for field in fields:
if field not in response:
raise ValueError(f"Missing required field: {field}")
def _do_decompose_concepts(params: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute concept decomposition (internal function)
Args:
params: Parameter dictionary containing user profile and question
Returns:
Decomposed concept data
"""
from prompts import generate_decomposition_prompt
user_profile = params.get("user_profile", {})
question = params.get("question", "")
system_prompt, user_prompt = generate_decomposition_prompt(
question,
user_profile.get("grade", "Not specified"),
user_profile.get("subject", "Not specified"),
user_profile.get("needs", "Not specified")
)
from concept_handler import MOCK_DECOMPOSITION_RESULT
response = call_llm(system_prompt, user_prompt, MOCK_DECOMPOSITION_RESULT)
return response
def decompose_concepts(user_profile: Dict[str, str], question: str) -> Dict[str, Any]:
"""
Use LLM to break down user questions into multiple concepts, with caching
Args:
user_profile: User profile information
question: User question
Returns:
Dictionary containing main concept, sub-concepts, and relationships
"""
params = {
"user_profile": user_profile,
"question": question
}
return cached_llm_call("decompose", params, _do_decompose_concepts)
def _do_get_concept_explanation(params: Dict[str, Any]) -> Dict[str, Any]:
"""
Execute concept explanation (internal function)
Args:
params: Parameter dictionary containing user profile and concept information
Returns:
Concept explanation data
"""
from prompts import generate_explanation_prompt
user_profile = params.get("user_profile", {})
concept_id = params.get("concept_id", "")
concept_name = params.get("concept_name", "")
concept_description = params.get("concept_description", "")
system_prompt, user_prompt = generate_explanation_prompt(
concept_name,
concept_description,
"", # Original question (not needed here)
user_profile.get("grade", "Not specified"),
user_profile.get("subject", "Not specified"),
user_profile.get("needs", "Not specified")
)
from concept_handler import MOCK_EXPLANATION_RESULT
response = call_llm(system_prompt, user_prompt, MOCK_EXPLANATION_RESULT)
return response
def get_concept_explanation(user_profile: Dict[str, str], concept_id: str,
concept_name: str, concept_description: str) -> Dict[str, Any]:
"""
Get detailed explanation and learning resources for a specific concept, with caching
Args:
user_profile: User profile information
concept_id: Concept ID
concept_name: Concept name
concept_description: Brief concept description
Returns:
Dictionary containing explanation, examples, and resources
"""
params = {
"user_profile": user_profile,
"concept_id": concept_id,
"concept_name": concept_name,
"concept_description": concept_description
}
return cached_llm_call("explain", params, _do_get_concept_explanation)