File size: 42,920 Bytes
06f87ee
 
 
 
 
 
e4647ff
43b6806
 
 
 
87ef596
ef12246
43b6806
 
87ef596
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87ef596
43b6806
 
 
 
 
 
 
 
87ef596
43b6806
 
87ef596
43b6806
 
 
 
87ef596
43b6806
 
 
 
 
 
 
 
 
 
 
 
87ef596
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06f87ee
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06f87ee
43b6806
 
 
 
 
 
 
 
 
 
06f87ee
43b6806
 
 
 
 
 
 
 
 
 
9f8b4b9
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c24aa0c
43b6806
 
 
 
 
c24aa0c
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c24aa0c
43b6806
c24aa0c
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c24aa0c
43b6806
 
 
 
 
c24aa0c
43b6806
 
c24aa0c
43b6806
 
 
c24aa0c
43b6806
 
 
c24aa0c
 
43b6806
 
 
 
 
 
 
c24aa0c
 
43b6806
 
 
255d500
43b6806
 
 
f2ae706
 
43b6806
f2ae706
e4647ff
f2ae706
43b6806
 
f2ae706
43b6806
f2ae706
43b6806
 
 
255d500
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255d500
 
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255d500
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255d500
43b6806
 
 
255d500
 
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
f2ae706
e4647ff
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
f2ae706
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06f87ee
43b6806
9f8b4b9
06f87ee
 
9f8b4b9
 
 
 
 
 
 
 
43b6806
9f8b4b9
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f8b4b9
43b6806
9f8b4b9
 
43b6806
9f8b4b9
43b6806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f8b4b9
43b6806
 
9f8b4b9
 
43b6806
 
 
 
 
 
 
9069a07
9f8b4b9
 
 
 
 
 
 
 
42ed941
9f8b4b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42ed941
9f8b4b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1f1819
9f8b4b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1f1819
9f8b4b9
 
a3a2c22
 
 
 
 
9f8b4b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43b6806
8788d4a
 
 
 
d986f08
43b6806
 
 
 
 
8788d4a
 
43b6806
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
import gradio as gr
import json
import os
from pathlib import Path

def create_reranking_interface(task_data):
    """Create a Gradio interface for reranking evaluation using drag and drop."""
    try:
        samples = task_data["samples"]
        results = {"task_name": task_data["task_name"], "task_type": "reranking", "annotations": []}
        completed_samples = {s["id"]: False for s in samples}
        
        # Define helper functions before the UI elements are created
        def generate_sortable_html(candidates, existing_ranks=None):
            """Generate the HTML for the sortable list with up/down buttons."""
            try:
                if existing_ranks and len(existing_ranks) == len(candidates):
                    order = sorted(range(len(candidates)), key=lambda i: existing_ranks[i])
                else:
                    order = list(range(len(candidates)))
                
                html = '<div id="sortable-container" class="sortable-container">'
                for rank_minus_1, idx in enumerate(order):
                    if idx < len(candidates):
                        doc = candidates[idx]
                        rank = rank_minus_1 + 1
                        import html as html_escaper
                        escaped_doc = html_escaper.escape(doc)
                        
                        # Add navigation buttons (up/down arrows)
                        up_disabled = "disabled" if rank == 1 else ""
                        down_disabled = "disabled" if rank == len(candidates) else ""
                        
                        html += f'''\
                        <div class="sortable-item rank-bg-{rank}" data-doc-id="{idx}" data-rank="{rank}">
                            <div class="rank-controls">
                                <button type="button" class="rank-btn up-btn" {up_disabled} onclick="window.moveItemUp({rank})">▲</button>
                                <div class="rank-badge">{rank}</div>
                                <button type="button" class="rank-btn down-btn" {down_disabled} onclick="window.moveItemDown({rank})">▼</button>
                            </div>
                            <div class="doc-content">{escaped_doc}</div>
                        </div>
                        '''
                html += '</div>'
                
                # Also return the computed order for proper initialization
                return html, order
            except Exception as e:
                print(f"Error in generate_sortable_html: {str(e)}")
                return f'<div class="error">Error generating ranking interface: {str(e)}</div>', []
        
        def save_ranking(order_json, sample_id):
            """Save the current ranking to results."""
            try:
                if not order_json or order_json == "[]":
                    return "⚠️ Drag documents to set the ranking before submitting.", progress_text.value
                
                order = json.loads(order_json)
                sample = next((s for s in samples if s["id"] == sample_id), None)
                
                if not sample:
                    return "⚠️ Sample not found.", progress_text.value
                    
                num_candidates = len(sample["candidates"])
                
                if len(order) != num_candidates:
                    return f"⚠️ Ranking order length mismatch. Expected {num_candidates}, got {len(order)}.", progress_text.value
                
                rankings = [0] * num_candidates
                try:
                    for rank_minus_1, doc_idx in enumerate(order):
                        if doc_idx < num_candidates:
                            rankings[doc_idx] = rank_minus_1 + 1
                        else:
                            raise ValueError(f"Invalid document index {doc_idx} found in order.")
                except Exception as e:
                    return f"⚠️ Error processing ranking order: {str(e)}", progress_text.value
                    
                if sorted(rankings) != list(range(1, num_candidates + 1)):
                    return "⚠️ Ranking validation failed. Ranks are not 1 to N.", progress_text.value
                    
                annotation = {"sample_id": sample_id, "rankings": rankings}
                
                # Check if this sample was already annotated
                existing_idx = next((i for i, a in enumerate(results["annotations"]) if a["sample_id"] == sample_id), None)
                if existing_idx is not None:
                    results["annotations"][existing_idx] = annotation
                else:
                    results["annotations"].append(annotation)
                    
                completed_samples[sample_id] = True
                
                # Save results with timestamp and better error handling
                try:
                    output_path = f"{task_data['task_name']}_human_results.json"
                    with open(output_path, "w") as f:
                        json.dump(results, f, indent=2)
                        
                    # Check if all samples are complete
                    all_completed = sum(completed_samples.values()) == len(samples)
                    completion_message = "🎉 All samples completed! You can save and submit your results." if all_completed else ""
                    
                    return f"✅ Rankings saved successfully ({len(results['annotations'])}/{len(samples)} completed) {completion_message}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
                except Exception as file_error:
                    print(f"Error saving file: {str(file_error)}")
                    # Still mark as completed in memory even if file save fails
                    return f"⚠️ Rankings recorded but file save failed: {str(file_error)}", f"Progress: {sum(completed_samples.values())}/{len(samples)}"
                    
            except json.JSONDecodeError:
                return "⚠️ Error decoding ranking order. Please try again.", progress_text.value
            except Exception as e:
                import traceback
                print(traceback.format_exc())
                return f"Error saving ranking: {str(e)}", progress_text.value
        
        def load_sample(sample_id):
            """Load a sample into the interface."""
            try:
                sample = next((s for s in samples if s["id"] == sample_id), None)
                if not sample:
                    return gr.update(), gr.update(), "[]", gr.update(), "Sample not found"
                
                existing_ranking = next((anno["rankings"] for anno in results["annotations"] if anno["sample_id"] == sample_id), None)
                
                # Get both the HTML and the initial order
                new_html, initial_order = generate_sortable_html(sample["candidates"], existing_ranking)
                
                # Convert initial order to JSON string for state
                initial_order_json = json.dumps(initial_order)
                
                status = "Ready to rank" if not completed_samples.get(sample_id, False) else "Already ranked"
                progress = f"Progress: {sum(completed_samples.values())}/{len(samples)}"
                
                return sample["query"], new_html, initial_order_json, progress, status
            except Exception as e:
                import traceback
                print(traceback.format_exc())
                return "Error loading sample", "<div>Error loading sample content</div>", "[]", "Error", f"Error: {str(e)}"
        
        def next_sample_id(current_id):
            try:
                current_idx = next((i for i, s in enumerate(samples) if s["id"] == current_id), -1)
                if current_idx == -1:
                    return samples[0]["id"] if samples else current_id
                next_idx = min(current_idx + 1, len(samples) - 1)
                return samples[next_idx]["id"]
            except Exception as e:
                print(f"Error in next_sample_id: {str(e)}")
                return current_id
        
        def prev_sample_id(current_id):
            try:
                current_idx = next((i for i, s in enumerate(samples) if s["id"] == current_id), -1)
                if current_idx == -1:
                    return samples[0]["id"] if samples else current_id
                prev_idx = max(current_idx - 1, 0)
                return samples[prev_idx]["id"]
            except Exception as e:
                print(f"Error in prev_sample_id: {str(e)}")
                return current_id
        
        def save_results():
            output_path = f"{task_data['task_name']}_human_results.json"
            try:
                # Create backup with timestamp
                from datetime import datetime
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                backup_path = f"{task_data['task_name']}_results_{timestamp}.json"
                
                # First create a backup
                with open(backup_path, "w") as f:
                    json.dump(results, f, indent=2)
                    
                # Then save to the main file
                with open(output_path, "w") as f:
                    json.dump(results, f, indent=2)
                    
                return f"✅ Results saved to {output_path} ({len(results['annotations'])} annotations)\nBackup created at {backup_path}"
            except Exception as e:
                return f"⚠️ Error saving results file: {str(e)}"
        
        # Create an empty initial sample ID with proper error handling
        initial_sample_id = samples[0]["id"] if samples else None
        if not initial_sample_id:
            print("WARNING: No samples found in task data")
            return gr.HTML("No samples found in the task data. Please check your task file and try again.")
        
        with gr.Blocks(theme=gr.themes.Soft()) as demo:
            gr.Markdown(f"# {task_data['task_name']} - Human Reranking Evaluation")
            with gr.Accordion("Instructions", open=True):
                gr.Markdown("""
                ## Task Instructions
                
                {instructions}
                
                ### How to use this interface:
                1. Read the query at the top
                2. Drag and drop documents to reorder them based on relevance
                3. Top document = Rank 1, Second = Rank 2, etc.
                4. Click "Submit Rankings" when you're done with the current query
                5. Use "Previous" and "Next" to navigate between queries
                6. Click "Save All Results" periodically to ensure your work is saved
                """.format(instructions=task_data.get("instructions", "Rank the documents based on their relevance to the query.")))
            
            current_sample_id = gr.State(value=initial_sample_id)
            
            with gr.Row():
                progress_text = gr.Textbox(label="Progress", value=f"Progress: 0/{len(samples)}", interactive=False)
                status_box = gr.Textbox(label="Status", value="Ready to start evaluation", interactive=False)
            
            with gr.Group():
                gr.Markdown("## Query:")
                query_text = gr.Textbox(value="Loading query...", label="", interactive=False)
                gr.Markdown("## Documents to Rank (Drag to Reorder):")
                sortable_list = gr.HTML("Loading documents...", elem_id="sortable-list-container")
                order_state = gr.Textbox(value="[]", visible=False, elem_id="current-order")
                with gr.Row():
                    prev_btn = gr.Button("← Previous Query", size="sm", elem_id="prev-btn")
                    submit_btn = gr.Button("Submit Rankings", size="lg", variant="primary", elem_id="submit-btn")
                    next_btn = gr.Button("Next Query →", size="sm", elem_id="next-btn")
                save_btn = gr.Button("💾 Save All Results", variant="secondary")
            
            js_code = """
            <script src="https://cdn.jsdelivr.net/npm/sortablejs@1.15.0/Sortable.min.js"></script>
            <script>
            // Make the functions globally available
            window.moveItemUp = function(currentRank) {
                console.log('Moving item up:', currentRank);
                if (currentRank <= 1) return; // Already at the top
                
                const container = document.getElementById('sortable-container');
                if (!container) {
                    console.error('Container not found');
                    return;
                }
                
                const items = Array.from(container.querySelectorAll('.sortable-item'));
                console.log('Found items:', items.length);
                
                // Find the items to swap by their data-rank attribute
                const currentItem = items.find(item => parseInt(item.getAttribute('data-rank')) === currentRank);
                const aboveItem = items.find(item => parseInt(item.getAttribute('data-rank')) === currentRank - 1);
                
                if (!currentItem || !aboveItem) {
                    console.error('Items not found:', currentItem, aboveItem);
                    return;
                }
                
                console.log('Swapping items:', currentItem, aboveItem);
                
                // Swap the items in the DOM
                if (aboveItem.previousElementSibling) {
                    container.insertBefore(currentItem, aboveItem);
                } else {
                    container.insertBefore(currentItem, container.firstChild);
                }
                
                // Update ranks
                window.updateRanksAfterMove();
            };
            
            window.moveItemDown = function(currentRank) {
                console.log('Moving item down:', currentRank);
                
                const container = document.getElementById('sortable-container');
                if (!container) {
                    console.error('Container not found');
                    return;
                }
                
                const items = Array.from(container.querySelectorAll('.sortable-item'));
                console.log('Found items:', items.length);
                
                if (currentRank >= items.length) return; // Already at the bottom
                
                // Find the items to swap by their data-rank attribute
                const currentItem = items.find(item => parseInt(item.getAttribute('data-rank')) === currentRank);
                const belowItem = items.find(item => parseInt(item.getAttribute('data-rank')) === currentRank + 1);
                
                if (!currentItem || !belowItem) {
                    console.error('Items not found for moving down');
                    return;
                }
                
                console.log('Swapping items down:', currentItem, belowItem);
                
                // Swap the items in the DOM - insert the current item after the below item
                container.insertBefore(currentItem, belowItem.nextElementSibling);
                
                // Update ranks
                window.updateRanksAfterMove();
            };
            
            window.updateRanksAfterMove = function() {
                console.log('Updating ranks');
                const container = document.getElementById('sortable-container');
                if (!container) {
                    console.error('Container not found for rank update');
                    return;
                }
                
                const items = Array.from(container.querySelectorAll('.sortable-item'));
                const orderInput = document.querySelector('#current-order textarea');
                if (!orderInput) {
                    console.error('Order input not found');
                    return;
                }
                
                const order = [];
                items.forEach((item, index) => {
                    const rank = index + 1;
                    const docId = parseInt(item.getAttribute('data-doc-id'));
                    
                    // Update rank display
                    const rankBadge = item.querySelector('.rank-badge');
                    if (rankBadge) rankBadge.textContent = rank;
                    
                    // Update item classes
                    item.className = item.className.replace(/rank-bg-\\d+/g, '').trim();
                    item.classList.add(`rank-bg-${rank}`);
                    
                    // Update data attribute
                    item.setAttribute('data-rank', rank);
                    
                    // Update button states
                    const upBtn = item.querySelector('.up-btn');
                    const downBtn = item.querySelector('.down-btn');
                    
                    if (upBtn) {
                        if (rank == 1) {
                            upBtn.setAttribute('disabled', 'disabled');
                        } else {
                            upBtn.removeAttribute('disabled');
                        }
                    }
                    
                    if (downBtn) {
                        if (rank == items.length) {
                            downBtn.setAttribute('disabled', 'disabled');
                        } else {
                            downBtn.removeAttribute('disabled');
                        }
                    }
                    
                    order.push(docId);
                });
                
                // Update hidden input with JSON
                console.log('New order:', order);
                const newOrderValue = JSON.stringify(order);
                orderInput.value = newOrderValue;
                
                // Trigger input event
                const event = new Event('input', { bubbles: true });
                orderInput.dispatchEvent(event);
            };
            
            document.addEventListener('DOMContentLoaded', function() {
                console.log('DOM loaded, initializing ranking interface');
                
                // Function to initialize the interface
                function initializeRankingInterface() {
                    const container = document.getElementById('sortable-container');
                    if (!container) {
                        console.log('Container not found, retrying in 200ms');
                        setTimeout(initializeRankingInterface, 200);
                        return;
                    }
                    
                    console.log('Sortable container found, setting up');
                    
                    // Add click events directly to buttons as a backup
                    const upButtons = container.querySelectorAll('.up-btn');
                    const downButtons = container.querySelectorAll('.down-btn');
                    
                    upButtons.forEach(btn => {
                        btn.addEventListener('click', function() {
                            const item = this.closest('.sortable-item');
                            const rank = parseInt(item.getAttribute('data-rank'));
                            window.moveItemUp(rank);
                        });
                    });
                    
                    downButtons.forEach(btn => {
                        btn.addEventListener('click', function() {
                            const item = this.closest('.sortable-item');
                            const rank = parseInt(item.getAttribute('data-rank'));
                            window.moveItemDown(rank);
                        });
                    });
                    
                    // Initialize drag-and-drop as fallback
                    if (typeof Sortable !== 'undefined') {
                        if (!container.sortableInstance) {
                            container.sortableInstance = new Sortable(container, {
                                animation: 150,
                                ghostClass: "sortable-ghost",
                                onEnd: function() {
                                    window.updateRanksAfterMove();
                                }
                            });
                        }
                    } else {
                        console.log('Sortable library not available');
                    }
                    
                    // Initialize the ranking
                    window.updateRanksAfterMove();
                }
                
                // Initialize immediately
                initializeRankingInterface();
                
                // Also observe DOM changes to reinitialize when needed
                const targetNode = document.getElementById('sortable-list-container');
                if (targetNode) {
                    const config = { childList: true, subtree: true };
                    const observer = new MutationObserver(function(mutationsList) {
                        for(const mutation of mutationsList) {
                            if (mutation.type === 'childList') {
                                if (document.getElementById('sortable-container')) {
                                    console.log('DOM changed, reinitializing');
                                    initializeRankingInterface();
                                }
                            }
                        }
                    });
                    observer.observe(targetNode, config);
                }
            });
            </script>
            <style>
            .sortable-container { 
                display: flex; 
                flex-direction: column; 
                gap: 12px; 
                min-height: 200px;
                padding: 16px;
                background-color: #f8f9fa;
                border-radius: 8px;
            }
            .sortable-item {
                padding: 14px; 
                background-color: #fff; 
                border: 1px solid #e0e0e0;
                border-radius: 6px; 
                display: flex; 
                align-items: center;
                transition: all 0.2s ease;
            }
            .sortable-item:hover { 
                background-color: #f8f9fa; 
                box-shadow: 0 2px 4px rgba(0,0,0,0.1);
            }
            .rank-controls {
                display: flex; 
                flex-direction: column;
                align-items: center;
                margin-right: 16px;
            }
            .rank-badge {
                display: flex; 
                align-items: center; 
                justify-content: center;
                width: 28px; 
                height: 28px; 
                border-radius: 50%;
                background-color: #6c757d; 
                color: white; 
                font-weight: bold; 
                margin: 6px 0;
                flex-shrink: 0;
            }
            .rank-btn {
                width: 28px;
                height: 28px;
                border: none;
                background-color: #f0f0f0;
                border-radius: 4px;
                margin: 2px 0;
                cursor: pointer;
                display: flex;
                align-items: center;
                justify-content: center;
                font-size: 14px;
            }
            .rank-btn:hover:not([disabled]) {
                background-color: #e0e0e0;
            }
            .rank-btn:active:not([disabled]) {
                background-color: #d0d0d0;
            }
            .rank-btn:disabled {
                opacity: 0.5;
                cursor: not-allowed;
            }
            .doc-content { 
                flex: 1; 
                line-height: 1.5;
                word-break: break-word;
            }
            /* More professional color scheme for rank badges */
            .rank-bg-1 .rank-badge { background-color: #1e40af; } /* Deep blue for top rank */
            .rank-bg-2 .rank-badge { background-color: #3b82f6; } /* Medium blue */
            .rank-bg-3 .rank-badge { background-color: #60a5fa; } /* Light blue */
            .rank-bg-4 .rank-badge { background-color: #93c5fd; color: #1e3a8a; } /* Very light blue with dark text */
            .rank-bg-5 .rank-badge { background-color: #bfdbfe; color: #1e3a8a; } /* Lightest blue with dark text */
            
            /* Lower ranks get progressively more gray */
            .rank-bg-6 .rank-badge, .rank-bg-7 .rank-badge { 
                background-color: #64748b; 
            }
            .rank-bg-8 .rank-badge, .rank-bg-9 .rank-badge, .rank-bg-10 .rank-badge { 
                background-color: #94a3b8; 
                color: #0f172a;
            }
            .rank-bg-11 .rank-badge, .rank-bg-12 .rank-badge, .rank-bg-13 .rank-badge, 
            .rank-bg-14 .rank-badge, .rank-bg-15 .rank-badge, .rank-bg-16 .rank-badge, 
            .rank-bg-17 .rank-badge, .rank-bg-18 .rank-badge, .rank-bg-19 .rank-badge, 
            .rank-bg-20 .rank-badge { 
                background-color: #cbd5e1;
                color: #0f172a;
            }
            .error {
                padding: 16px;
                background-color: #fee2e2;
                border: 1px solid #f87171;
                color: #b91c1c;
                border-radius: 6px;
                margin: 16px 0;
            }
            </style>
            """
            gr.HTML(js_code)
            
            submit_btn.click(
                save_ranking,
                inputs=[order_state, current_sample_id],
                outputs=[status_box, progress_text]
            )
            
            next_btn.click(
                next_sample_id, inputs=[current_sample_id], outputs=[current_sample_id]
            ).then(
                load_sample,
                inputs=[current_sample_id],
                outputs=[query_text, sortable_list, order_state, progress_text, status_box]
            )
            
            prev_btn.click(
                prev_sample_id, inputs=[current_sample_id], outputs=[current_sample_id]
            ).then(
                load_sample,
                inputs=[current_sample_id],
                outputs=[query_text, sortable_list, order_state, progress_text, status_box]
            )
            
            save_btn.click(save_results, outputs=[status_box])
            
            # Use a custom loading function with proper error handling
            def safe_load_initial():
                try:
                    if initial_sample_id and samples:
                        return load_sample(initial_sample_id)
                    else:
                        return "No query available", "<div>No documents available</div>", "[]", "No progress data", "Error: No samples found"
                except Exception as e:
                    print(f"Error in initial load: {str(e)}")
                    return "Error loading query", "<div>Error loading documents</div>", "[]", "Error", f"Error: {str(e)}"
            
            # Use the safe loading function to prevent scheduling failures
            demo.load(safe_load_initial, 
                     outputs=[query_text, sortable_list, order_state, progress_text, status_box])
            
        return demo
    except Exception as e:
        import traceback
        print(f"Error creating reranking interface: {traceback.format_exc()}")
        # Return a simple error interface instead of failing completely
        with gr.Blocks() as error_demo:
            gr.Markdown("# Error Creating Reranking Interface")
            gr.Markdown(f"An error occurred while creating the interface: **{str(e)}**")
            gr.Markdown("Please check your task data and try again.")
        return error_demo

# Main app with file upload capability and better error handling
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# MTEB Human Evaluation Demo")
    
    with gr.Tabs():
        with gr.TabItem("Demo"):
            gr.Markdown("""
            ## MTEB Human Evaluation Interface
            
            This interface allows you to evaluate the relevance of documents for reranking tasks.
            """)
            
            # Function to get the most recent task file with error handling
            def get_latest_task_file():
                try:
                    # Check first in uploaded_tasks directory
                    os.makedirs("uploaded_tasks", exist_ok=True)
                    uploaded_tasks = [f for f in os.listdir("uploaded_tasks") if f.endswith(".json")]
                    
                    if uploaded_tasks:
                        # Sort by modification time, newest first
                        uploaded_tasks.sort(key=lambda x: os.path.getmtime(os.path.join("uploaded_tasks", x)), reverse=True)
                        return os.path.join("uploaded_tasks", uploaded_tasks[0])
                    
                    # Fall back to default example
                    if os.path.exists("AskUbuntuDupQuestions_human_eval.json"):
                        return "AskUbuntuDupQuestions_human_eval.json"
                    
                    # If no files found
                    return None
                except Exception as e:
                    print(f"Error getting latest task file: {str(e)}")
                    return None
            
            # Load the task file with proper error handling
            task_file = get_latest_task_file()
            
            task_data = None
            try:
                if task_file and os.path.exists(task_file):
                    with open(task_file, "r") as f:
                        task_data = json.load(f)
                    
                    # Show which task is currently loaded
                    gr.Markdown(f"**Current Task: {task_data['task_name']}** ({len(task_data['samples'])} samples)")
                    
                    # Display the interface
                    reranking_demo = create_reranking_interface(task_data)
                else:
                    gr.Markdown("**No task file found**")
                    gr.Markdown("Please upload a valid task file in the 'Upload & Evaluate' tab.")
                    
                    # Create a dummy interface with instructions
                    with gr.Blocks() as dummy_demo:
                        gr.Markdown("### No Task Loaded")
                        gr.Markdown("Please go to the 'Upload & Evaluate' tab to upload a task file.")
                    reranking_demo = dummy_demo
            except Exception as e:
                import traceback
                print(f"Error loading task: {traceback.format_exc()}")
                gr.Markdown(f"**Error loading task: {str(e)}**")
                gr.Markdown("Please upload a valid task file in the 'Upload & Evaluate' tab.")
                
                # Create a simple error interface
                with gr.Blocks() as error_demo:
                    gr.Markdown("### Error Loading Task")
                    gr.Markdown(f"An error occurred: **{str(e)}**")
                    gr.Markdown("Please try uploading a different task file.")
                reranking_demo = error_demo
        
        with gr.TabItem("Upload & Evaluate"):
            gr.Markdown("""
            ## Upload Your Own Task File
            
            If you have a prepared task file, you can upload it here to create an evaluation interface.
            """)
            
            with gr.Row():
                with gr.Column():
                    file_input = gr.File(label="Upload a task file (JSON)")
                    load_btn = gr.Button("Load Task")
                    message = gr.Textbox(label="Status", interactive=False)
                    
                    # Add task list for previously uploaded tasks
                    gr.Markdown("### Previous Uploads")
                    
                    # Function to list existing task files in the tasks directory
                    def list_task_files():
                        os.makedirs("uploaded_tasks", exist_ok=True)
                        tasks = [f for f in os.listdir("uploaded_tasks") if f.endswith(".json")]
                        if not tasks:
                            return "No task files uploaded yet."
                        return "\n".join([f"- [{t}](javascript:selectTask('{t}'))" for t in tasks])
                    
                    task_list = gr.Markdown(list_task_files())
                    refresh_btn = gr.Button("Refresh List")
                    
                    # Add results management section
                    gr.Markdown("### Results Management")
                    
                    # Function to list existing result files
                    def list_result_files():
                        results = [f for f in os.listdir(".") if f.endswith("_human_results.json")]
                        if not results:
                            return "No result files available yet."
                        
                        result_links = []
                        for r in results:
                            # Calculate completion stats
                            try:
                                with open(r, "r") as f:
                                    result_data = json.load(f)
                                annotation_count = len(result_data.get("annotations", []))
                                task_name = result_data.get("task_name", "Unknown")
                                result_links.append(f"- {r} ({annotation_count} annotations for {task_name})")
                            except:
                                result_links.append(f"- {r}")
                        
                        return "\n".join(result_links)
                    
                    results_list = gr.Markdown(list_result_files())
                    download_results_btn = gr.Button("Download Results")
                    
                # Right side - will contain the actual interface
                with gr.Column():
                    task_container = gr.HTML()
            
            # Handle file upload and storage
            def handle_upload(file):
                if not file:
                    return "Please upload a task file", task_list.value, task_container.value
                
                try:
                    # Create directory if it doesn't exist
                    os.makedirs("uploaded_tasks", exist_ok=True)
                    
                    # Read the uploaded file
                    with open(file.name, "r") as f:
                        task_data = json.load(f)
                    
                    # Validate task format
                    if "task_name" not in task_data or "samples" not in task_data:
                        return "Invalid task file format. Must contain 'task_name' and 'samples' fields.", task_list.value, task_container.value
                    
                    # Save to a consistent location
                    task_filename = f"uploaded_tasks/{task_data['task_name']}_task.json"
                    with open(task_filename, "w") as f:
                        json.dump(task_data, f, indent=2)
                    
                    # Instead of trying to create the interface here,
                    # we'll return a message with instructions
                    return f"Task '{task_data['task_name']}' uploaded successfully with {len(task_data['samples'])} samples. Please refresh the app and use the Demo tab to evaluate it.", list_task_files(), f"""
                    <div style="padding: 20px; background-color: #f0f0f0; border-radius: 10px;">
                        <h3>Task uploaded successfully!</h3>
                        <p>Task Name: {task_data['task_name']}</p>
                        <p>Samples: {len(task_data['samples'])}</p>
                        <p>To evaluate this task:</p>
                        <ol>
                            <li>Refresh the app</li>
                            <li>The Demo tab will now use your uploaded task</li>
                            <li>Complete your evaluations</li>
                            <li>Results will be saved as {task_data['task_name']}_human_results.json</li>
                        </ol>
                    </div>
                    """
                except Exception as e:
                    return f"Error processing task file: {str(e)}", task_list.value, task_container.value
            
            # Function to prepare results for download
            def prepare_results_for_download():
                results = [f for f in os.listdir(".") if f.endswith("_human_results.json")]
                if not results:
                    return None
                
                # Create a zip file with all results
                import zipfile
                zip_path = "mteb_human_eval_results.zip"
                with zipfile.ZipFile(zip_path, 'w') as zipf:
                    for r in results:
                        zipf.write(r)
                
                return zip_path
            
            # Connect events
            load_btn.click(handle_upload, inputs=[file_input], outputs=[message, task_list, task_container])
            refresh_btn.click(list_task_files, outputs=[task_list])
            download_results_btn.click(prepare_results_for_download, outputs=[gr.File(label="Download Results")])
        
        with gr.TabItem("Results Management"):
            gr.Markdown("""
            ## Manage Evaluation Results
            
            View, download, and analyze your evaluation results.
            """)
            
            # Function to load and display result stats
            def get_result_stats():
                results = [f for f in os.listdir(".") if f.endswith("_human_results.json")]
                if not results:
                    return "No result files available yet."
                
                stats = []
                for r in results:
                    try:
                        with open(r, "r") as f:
                            result_data = json.load(f)
                        
                        task_name = result_data.get("task_name", "Unknown")
                        annotations = result_data.get("annotations", [])
                        annotation_count = len(annotations)
                        
                        # Calculate completion percentage
                        sample_ids = set(a.get("sample_id") for a in annotations)
                        
                        # Try to get the total sample count from the corresponding task file
                        total_samples = 0
                        task_file = f"uploaded_tasks/{task_name}_task.json"
                        if os.path.exists(task_file):
                            with open(task_file, "r") as f:
                                task_data = json.load(f)
                            total_samples = len(task_data.get("samples", []))
                        
                        completion = f"{len(sample_ids)}/{total_samples}" if total_samples else f"{len(sample_ids)} samples"
                        
                        stats.append(f"### {task_name}\n- Annotations: {annotation_count}\n- Completion: {completion}\n- File: {r}")
                    except Exception as e:
                        stats.append(f"### {r}\n- Error loading results: {str(e)}")
                
                return "\n\n".join(stats)
            
            result_stats = gr.Markdown(get_result_stats())
            refresh_results_btn = gr.Button("Refresh Results")
            
            # Add download options
            with gr.Row():
                with gr.Column():
                    download_all_btn = gr.Button("Download All Results (ZIP)")
                with gr.Column():
                    result_select = gr.Dropdown(choices=[f for f in os.listdir(".") if f.endswith("_human_results.json")], label="Select Result to Download", value=None)
                    download_selected_btn = gr.Button("Download Selected")
            
            # Add results visualization placeholder
            gr.Markdown("### Results Visualization")
            gr.Markdown("*Visualization features will be added in a future update.*")
            
            # Connect events
            refresh_results_btn.click(get_result_stats, outputs=[result_stats])
            
            # Function to prepare all results for download as ZIP
            def prepare_all_results():
                import zipfile
                zip_path = "mteb_human_eval_results.zip"
                with zipfile.ZipFile(zip_path, 'w') as zipf:
                    for r in [f for f in os.listdir(".") if f.endswith("_human_results.json")]:
                        zipf.write(r)
                return zip_path
            
            # Function to return a single result file
            def get_selected_result(filename):
                if not filename:
                    return None
                if os.path.exists(filename):
                    return filename
                return None
            
            # Update dropdown when refreshing results
            def update_result_dropdown():
                return gr.Dropdown.update(choices=[f for f in os.listdir(".") if f.endswith("_human_results.json")])
            
            refresh_results_btn.click(update_result_dropdown, outputs=[result_select])
            download_all_btn.click(prepare_all_results, outputs=[gr.File(label="Download All Results")])
            download_selected_btn.click(get_selected_result, inputs=[result_select], outputs=[gr.File(label="Download Selected Result")])

if __name__ == "__main__":
    try:
        # Use options compatible with Gradio 3.42.0
        import os
        # Disable file watching to prevent restart loops
        os.environ['GRADIO_WATCH'] = 'no'
        demo.launch(show_error=True)
    except Exception as e:
        import traceback
        print(f"Error launching demo: {traceback.format_exc()}")
        print("\nTrying alternative launch method...")
        try:
            # Alternative launch method
            demo.launch(share=False, debug=True)
        except Exception as e2:
            print(f"Alternative launch also failed: {str(e2)}")
            print("\nPlease check your Gradio installation and try again.")