soacti-ai-quiz-api / quiz_generator.py
Soacti's picture
Update quiz_generator.py
4491987 verified
import aiohttp
import asyncio
import json
import logging
import re
from typing import List, Dict, Any
from fallback_questions import FallbackQuestions
logger = logging.getLogger(__name__)
class QuizGenerator:
def __init__(self, api_key: str):
self.api_key = api_key
self.model_used = "fallback"
self.generation_method = "fallback"
self.fallback = FallbackQuestions()
# Modell-konfigurasjon
self.models = {
"norwegian": "NbAiLab/nb-gpt-j-6B",
"english": "meta-llama/Llama-2-70b-chat-hf",
"fallback": "google/flan-t5-small"
}
async def generate_quiz(self, request) -> List[Dict[str, Any]]:
"""Hovedmetode for quiz-generering"""
logger.info(f"Starter quiz-generering: {request.tema} ({request.språk})")
# Prøv AI-generering først
if self.api_key:
try:
questions = await self._try_ai_generation(request)
if questions:
logger.info(f"AI-generering suksess: {len(questions)} spørsmål")
return questions
except Exception as e:
logger.warning(f"AI-generering feilet: {e}")
# Fallback til forhåndsdefinerte spørsmål
logger.info("Bruker fallback-spørsmål")
self.model_used = "fallback"
self.generation_method = "predefined"
return self.fallback.get_questions(
tema=request.tema,
språk=request.språk,
antall=request.antall_spørsmål,
type=request.type,
vanskelighet=request.vanskelighetsgrad
)
async def _try_ai_generation(self, request) -> List[Dict[str, Any]]:
"""Prøv AI-generering med forskjellige modeller"""
# Velg modell basert på språk
if request.språk == "no":
model = self.models["norwegian"]
else:
model = self.models["english"]
logger.info(f"Prøver AI-modell: {model}")
try:
questions = await self._call_huggingface_api(model, request)
if questions:
self.model_used = model
self.generation_method = "ai"
return questions
except Exception as e:
logger.warning(f"Modell {model} feilet: {e}")
# Prøv fallback-modell
try:
logger.info(f"Prøver fallback-modell: {self.models['fallback']}")
questions = await self._call_huggingface_api(self.models["fallback"], request)
if questions:
self.model_used = self.models["fallback"]
self.generation_method = "ai_fallback"
return questions
except Exception as e:
logger.warning(f"Fallback-modell feilet: {e}")
return []
async def _call_huggingface_api(self, model: str, request) -> List[Dict[str, Any]]:
"""Kall Hugging Face Inference API"""
prompt = self._build_prompt(request, model)
async with aiohttp.ClientSession() as session:
async with session.post(
f"https://api-inference.huggingface.co/models/{model}",
headers={
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
},
json={
"inputs": prompt,
"parameters": {
"max_new_tokens": 1500,
"temperature": 0.7,
"do_sample": True,
"top_p": 0.9
}
},
timeout=aiohttp.ClientTimeout(total=30)
) as response:
if response.status != 200:
error_text = await response.text()
raise Exception(f"HTTP {response.status}: {error_text}")
data = await response.json()
# Parse response
if isinstance(data, list) and data:
generated_text = data[0].get("generated_text", "")
elif isinstance(data, dict):
generated_text = data.get("generated_text", "")
else:
raise Exception("Uventet response-format")
# Parse quiz fra generert tekst
questions = self._parse_quiz_response(generated_text, request.antall_spørsmål)
if not questions:
raise Exception("Kunne ikke parse quiz-spørsmål fra AI-respons")
return questions
def _build_prompt(self, request, model: str) -> str:
"""Bygg prompt for AI-modell"""
if request.språk == "no":
return f"""Generer {request.antall_spørsmål} quiz-spørsmål på norsk om temaet "{request.tema}".
Format for hvert spørsmål:
SPØRSMÅL: [spørsmålstekst]
A) [alternativ 1]
B) [alternativ 2]
C) [alternativ 3]
D) [alternativ 4]
KORREKT: [A, B, C eller D]