AdnanElAssadi commited on
Commit
7307de0
·
verified ·
1 Parent(s): 6f6e9c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -27,7 +27,7 @@ def create_reranking_interface(task_data):
27
  if len(set(processed_rankings)) != len(processed_rankings):
28
  return "⚠️ Each document must have a unique rank. Please review your rankings.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
29
 
30
- # Store this annotation
31
  existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
32
  if existing_idx is not None:
33
  results["annotations"][existing_idx] = {
@@ -41,21 +41,19 @@ def create_reranking_interface(task_data):
41
  })
42
 
43
  completed_samples[sample_id] = True
44
- success_msg = f"✅ Rankings for query '{sample_id}' successfully saved!"
45
- progress = f"Progress: {sum(completed_samples.values())}/{len(samples)}"
46
 
47
- # Auto-save results after each submission with error handling
48
  try:
49
  output_path = f"{task_data['task_name']}_human_results.json"
50
  with open(output_path, "w") as f:
51
  json.dump(results, f, indent=2)
52
- except Exception as e:
53
- return f"⚠️ Rankings saved in memory but couldn't write to file: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
54
-
55
- return success_msg, progress
56
  except Exception as e:
57
- # Catch-all for any other unexpected errors
58
- return f"⚠️ An error occurred: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
59
 
60
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
61
  gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")
 
27
  if len(set(processed_rankings)) != len(processed_rankings):
28
  return "⚠️ Each document must have a unique rank. Please review your rankings.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
29
 
30
+ # Store this annotation in memory
31
  existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
32
  if existing_idx is not None:
33
  results["annotations"][existing_idx] = {
 
41
  })
42
 
43
  completed_samples[sample_id] = True
 
 
44
 
45
+ # Try to save to file, but continue even if it fails
46
  try:
47
  output_path = f"{task_data['task_name']}_human_results.json"
48
  with open(output_path, "w") as f:
49
  json.dump(results, f, indent=2)
50
+ return f"✅ Rankings saved successfully (in memory and to file)", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
51
+ except:
52
+ # If file saving fails, still mark as success since we saved in memory
53
+ return f"✅ Rankings saved in memory (file save failed)", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
54
  except Exception as e:
55
+ # Return specific error message
56
+ return f"Error: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
57
 
58
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
  gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")