AdnanElAssadi commited on
Commit
6f6e9c8
·
verified ·
1 Parent(s): 06f87ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -34
app.py CHANGED
@@ -11,41 +11,51 @@ def create_reranking_interface(task_data):
11
 
12
  def save_ranking(rankings, sample_id):
13
  """Save the current set of rankings."""
14
- # Check if all documents have rankings
15
- all_ranked = all(r is not None and r != "" for r in rankings)
16
- if not all_ranked:
17
- return "⚠️ Please assign a rank to all documents before submitting", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
18
-
19
- # Convert rankings to integers
20
- processed_rankings = [int(r) for r in rankings]
21
-
22
- # Check for duplicate rankings
23
- if len(set(processed_rankings)) != len(processed_rankings):
24
- return "⚠️ Each document must have a unique rank. Please review your rankings.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
 
 
 
 
25
 
26
- # Store this annotation
27
- existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
28
- if existing_idx is not None:
29
- results["annotations"][existing_idx] = {
30
- "sample_id": sample_id,
31
- "rankings": processed_rankings
32
- }
33
- else:
34
- results["annotations"].append({
35
- "sample_id": sample_id,
36
- "rankings": processed_rankings
37
- })
38
-
39
- completed_samples[sample_id] = True
40
- success_msg = f"✅ Rankings for query '{sample_id}' successfully saved!"
41
- progress = f"Progress: {sum(completed_samples.values())}/{len(samples)}"
42
-
43
- # Auto-save results after each submission
44
- output_path = f"{task_data['task_name']}_human_results.json"
45
- with open(output_path, "w") as f:
46
- json.dump(results, f, indent=2)
47
-
48
- return success_msg, progress
 
 
 
 
 
 
49
 
50
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
51
  gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")
 
11
 
12
  def save_ranking(rankings, sample_id):
13
  """Save the current set of rankings."""
14
+ try:
15
+ # Check if all documents have rankings
16
+ all_ranked = all(r is not None and r != "" for r in rankings)
17
+ if not all_ranked:
18
+ return "⚠️ Please assign a rank to all documents before submitting", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
19
+
20
+ # Convert rankings to integers with better error handling
21
+ try:
22
+ processed_rankings = [int(r) for r in rankings]
23
+ except ValueError:
24
+ return "⚠️ Invalid ranking value. Please use only numbers.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
25
+
26
+ # Check for duplicate rankings
27
+ if len(set(processed_rankings)) != len(processed_rankings):
28
+ return "⚠️ Each document must have a unique rank. Please review your rankings.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
29
 
30
+ # Store this annotation
31
+ existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
32
+ if existing_idx is not None:
33
+ results["annotations"][existing_idx] = {
34
+ "sample_id": sample_id,
35
+ "rankings": processed_rankings
36
+ }
37
+ else:
38
+ results["annotations"].append({
39
+ "sample_id": sample_id,
40
+ "rankings": processed_rankings
41
+ })
42
+
43
+ completed_samples[sample_id] = True
44
+ success_msg = f"✅ Rankings for query '{sample_id}' successfully saved!"
45
+ progress = f"Progress: {sum(completed_samples.values())}/{len(samples)}"
46
+
47
+ # Auto-save results after each submission with error handling
48
+ try:
49
+ output_path = f"{task_data['task_name']}_human_results.json"
50
+ with open(output_path, "w") as f:
51
+ json.dump(results, f, indent=2)
52
+ except Exception as e:
53
+ return f"⚠️ Rankings saved in memory but couldn't write to file: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
54
+
55
+ return success_msg, progress
56
+ except Exception as e:
57
+ # Catch-all for any other unexpected errors
58
+ return f"⚠️ An error occurred: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
59
 
60
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
61
  gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")