File size: 6,280 Bytes
970eef1
 
 
ffa4ae8
2a8ebbd
 
 
d6b6619
970eef1
 
 
 
 
 
 
 
 
 
 
 
3964afa
970eef1
 
3964afa
970eef1
 
 
 
3964afa
970eef1
3964afa
970eef1
 
3964afa
970eef1
3964afa
970eef1
 
3964afa
970eef1
 
3964afa
970eef1
 
 
 
3964afa
2a8ebbd
970eef1
3964afa
970eef1
 
 
3964afa
d6b6619
970eef1
3964afa
970eef1
 
 
 
3964afa
970eef1
 
 
 
 
 
3964afa
970eef1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3964afa
970eef1
 
 
 
3964afa
79407fd
 
970eef1
 
 
79407fd
 
 
970eef1
 
 
 
 
 
 
 
 
 
 
 
 
2a8ebbd
 
 
 
970eef1
2a8ebbd
 
 
 
 
 
 
 
 
39acd70
 
3964afa
39acd70
3964afa
39acd70
 
 
3964afa
39acd70
 
2a8ebbd
 
 
 
 
39acd70
 
 
2a8ebbd
 
 
 
 
39acd70
2a8ebbd
 
39acd70
2a8ebbd
39acd70
2a8ebbd
970eef1
2a8ebbd
970eef1
 
2a8ebbd
970eef1
2a8ebbd
970eef1
 
2a8ebbd
970eef1
2a8ebbd
970eef1
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
from fastapi import APIRouter, HTTPException
from typing import Dict, Any
import os
from tasks.evaluation_task import EvaluationTask
from huggingface_hub import hf_hub_download
import json
from datetime import datetime
import asyncio

router = APIRouter(tags=["evaluation"])

# Store active evaluation tasks by session_id
active_evaluation_tasks = {}

@router.post("/evaluate-benchmark")
async def evaluate_benchmark(data: Dict[str, Any]):
    """
    Lancer l'évaluation d'un benchmark pour une session donnée
    
    Args:
        data: Dictionary containing session_id
        
    Returns:
        Dictionary with status and initial logs
    """
    session_id = data.get("session_id")
    
    if not session_id:
        return {"error": "Session ID missing or invalid"}
    
    # Check if an evaluation is already in progress for this session
    if session_id in active_evaluation_tasks:
        evaluation_task = active_evaluation_tasks[session_id]
        # If the evaluation is already completed, we can start a new one
        if evaluation_task.is_task_completed():
            # Delete the old task
            del active_evaluation_tasks[session_id]
        else:
            # An evaluation is already in progress
            return {
                "status": "already_running",
                "message": "An evaluation is already in progress for this session",
                "logs": evaluation_task.get_logs()
            }
    
    try:
        # Dataset name based on session ID
        dataset_name = f"yourbench/yourbench_{session_id}"
        
        # Create and start a new evaluation task
        evaluation_task = EvaluationTask(session_uid=session_id, dataset_name=dataset_name)
        active_evaluation_tasks[session_id] = evaluation_task
        
        # Start the evaluation asynchronously
        asyncio.create_task(evaluation_task.run())
        
        # Get initial logs
        initial_logs = evaluation_task.get_logs()
        
        return {
            "status": "started",
            "message": f"Evaluation started for benchmark {dataset_name}",
            "logs": initial_logs
        }
    except Exception as e:
        return {
            "status": "error",
            "error": str(e),
            "message": f"Error starting evaluation: {str(e)}"
        }

@router.get("/evaluation-logs/{session_id}")
async def get_evaluation_logs(session_id: str):
    """
    Récupérer les logs d'une évaluation en cours
    
    Args:
        session_id: ID de la session pour laquelle récupérer les logs
        
    Returns:
        Dictionary avec logs et statut de complétion
    """
    if session_id not in active_evaluation_tasks:
        raise HTTPException(status_code=404, detail="Tâche d'évaluation non trouvée")
    
    evaluation_task = active_evaluation_tasks[session_id]
    logs = evaluation_task.get_logs()
    is_completed = evaluation_task.is_task_completed()
    
    # Get results if available and evaluation is completed
    results = None
    if is_completed and hasattr(evaluation_task, 'results') and evaluation_task.results:
        results = evaluation_task.results
    
    # Get step information
    progress = evaluation_task.get_progress()
    
    return {
        "logs": logs,
        "is_completed": is_completed,
        "results": results,
        "current_step": progress["current_step"],
        "completed_steps": progress["completed_steps"]
    }

@router.get("/evaluation-results/{session_id}")
async def get_evaluation_results(session_id: str):
    """
    Retrieve results of a completed evaluation
    
    Args:
        session_id: Session ID to retrieve results for
        
    Returns:
        Dictionary with evaluation results
    """
    try:
        # Get organization from environment
        organization = os.getenv("HF_ORGANIZATION", "yourbench")
        dataset_name = f"{organization}/yourbench_{session_id}"
        
        # Try to load results from the Hub
        try:
            results_file = hf_hub_download(
                repo_id=dataset_name,
                repo_type="dataset",
                filename="lighteval_results.json"
            )
            
            with open(results_file) as f:
                results_data = json.load(f)
            
            # Check if results are in the new format or old format
            if "results" in results_data and isinstance(results_data["results"], list):
                # New format: { "metadata": ..., "results": [...] }
                results_list = results_data["results"]
                metadata = results_data.get("metadata", {})
            else:
                # Old format: [...] (list directly)
                results_list = results_data
                metadata = {}
            
            # Format results to match the expected format
            formatted_results = {
                "metadata": {
                    "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                    "session_id": metadata.get("session_id", session_id),
                    "total_models_tested": len(results_list),
                    "successful_tests": len([r for r in results_list if r.get("status") == "success"])
                },
                "models_comparison": [
                    {
                        "model_name": result["model"],
                        "provider": result["provider"],
                        "success": result.get("status") == "success",
                        "accuracy": result["accuracy"],
                        "evaluation_time": result["execution_time"],
                        "error": result.get("status") if result.get("status") != "success" else None
                    }
                    for result in results_list
                ]
            }
            
            return {
                "success": True,
                "results": formatted_results
            }
        except Exception as e:
            return {
                "success": False,
                "message": f"Failed to load results from Hub: {str(e)}"
            }
            
    except Exception as e:
        return {
            "success": False,
            "message": f"Error retrieving evaluation results: {str(e)}"
        }