AdnanElAssadi commited on
Commit
ed8703b
·
verified ·
1 Parent(s): bc57cf2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -21
app.py CHANGED
@@ -10,27 +10,24 @@ def create_reranking_interface(task_data):
10
  completed_samples = {s["id"]: False for s in samples}
11
 
12
  def save_ranking(rankings, sample_id):
13
- print("DEBUG: Rankings received:", rankings)
14
- print("DEBUG: sample_id received:", sample_id, type(sample_id))
15
  try:
16
  # Check if all documents have rankings
17
  all_ranked = all(r is not None and r != "" for r in rankings)
18
  if not all_ranked:
19
  return "⚠️ Please assign a rank to all documents before submitting", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
20
 
21
- # Convert rankings to integers
22
  try:
23
  processed_rankings = [int(r) for r in rankings]
24
- print("DEBUG: Processed rankings:", processed_rankings)
25
- except ValueError as ve:
26
- print("DEBUG: ValueError in ranking conversion:", ve)
27
  return "⚠️ Invalid ranking value. Please use only numbers.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
28
 
29
  # Check for duplicate rankings
30
  if len(set(processed_rankings)) != len(processed_rankings):
31
  return "⚠️ Each document must have a unique rank. Please review your rankings.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
32
 
33
- # Store annotation in memory
34
  existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
35
  if existing_idx is not None:
36
  results["annotations"][existing_idx] = {
@@ -43,23 +40,20 @@ def create_reranking_interface(task_data):
43
  "rankings": processed_rankings
44
  })
45
 
46
- # Convert sample_id if necessary
47
- if sample_id not in completed_samples:
48
- try:
49
- sample_id = int(sample_id)
50
- except:
51
- pass
52
- print("DEBUG: Completed samples keys:", completed_samples.keys())
53
  completed_samples[sample_id] = True
54
 
55
- output_path = f"{task_data['task_name']}_human_results.json"
56
- with open(output_path, "w") as f:
57
- json.dump(results, f, indent=2)
58
- return " Rankings saved successfully", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
 
 
 
 
 
59
  except Exception as e:
60
- print("DEBUG: Exception occurred:", e)
61
  return f"Error: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
62
-
63
 
64
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
65
  gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")
@@ -186,9 +180,16 @@ def create_reranking_interface(task_data):
186
  json.dump(results, f, indent=2)
187
  return f"✅ Results saved to {output_path} ({len(results['annotations'])} annotations)"
188
 
 
 
 
 
 
 
 
189
  # Connect events
190
  submit_btn.click(
191
- save_ranking,
192
  inputs=ranking_dropdowns + [current_sample_id],
193
  outputs=[status_box, progress_text]
194
  )
 
10
  completed_samples = {s["id"]: False for s in samples}
11
 
12
  def save_ranking(rankings, sample_id):
13
+ """Save the current set of rankings."""
 
14
  try:
15
  # Check if all documents have rankings
16
  all_ranked = all(r is not None and r != "" for r in rankings)
17
  if not all_ranked:
18
  return "⚠️ Please assign a rank to all documents before submitting", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
19
 
20
+ # Convert rankings to integers with better error handling
21
  try:
22
  processed_rankings = [int(r) for r in rankings]
23
+ except ValueError:
 
 
24
  return "⚠️ Invalid ranking value. Please use only numbers.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
25
 
26
  # Check for duplicate rankings
27
  if len(set(processed_rankings)) != len(processed_rankings):
28
  return "⚠️ Each document must have a unique rank. Please review your rankings.", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
29
 
30
+ # Store this annotation in memory
31
  existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
32
  if existing_idx is not None:
33
  results["annotations"][existing_idx] = {
 
40
  "rankings": processed_rankings
41
  })
42
 
 
 
 
 
 
 
 
43
  completed_samples[sample_id] = True
44
 
45
+ # Try to save to file, but continue even if it fails
46
+ try:
47
+ output_path = f"{task_data['task_name']}_human_results.json"
48
+ with open(output_path, "w") as f:
49
+ json.dump(results, f, indent=2)
50
+ return f"✅ Rankings saved successfully (in memory and to file)", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
51
+ except:
52
+ # If file saving fails, still mark as success since we saved in memory
53
+ return f"✅ Rankings saved in memory (file save failed)", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
54
  except Exception as e:
55
+ # Return specific error message
56
  return f"Error: {str(e)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
 
57
 
58
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
59
  gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")
 
180
  json.dump(results, f, indent=2)
181
  return f"✅ Results saved to {output_path} ({len(results['annotations'])} annotations)"
182
 
183
+ # Define a wrapper function that collects all the dropdown values into a list
184
+ def save_ranking_wrapper(*args):
185
+ # The last argument is the sample_id, all others are rankings
186
+ rankings = args[:-1]
187
+ sample_id = args[-1]
188
+ return save_ranking(rankings, sample_id)
189
+
190
  # Connect events
191
  submit_btn.click(
192
+ save_ranking_wrapper,
193
  inputs=ranking_dropdowns + [current_sample_id],
194
  outputs=[status_box, progress_text]
195
  )