kitab-bench commited on
Commit
5fd0555
·
verified ·
1 Parent(s): 00364aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +402 -187
app.py CHANGED
@@ -1,204 +1,419 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
- from apscheduler.schedulers.background import BackgroundScheduler
5
- from huggingface_hub import snapshot_download
6
 
7
- from src.about import (
8
- CITATION_BUTTON_LABEL,
9
- CITATION_BUTTON_TEXT,
10
- EVALUATION_QUEUE_TEXT,
11
- INTRODUCTION_TEXT,
12
- LLM_BENCHMARKS_TEXT,
13
- TITLE,
14
- )
15
- from src.display.css_html_js import custom_css
16
- from src.display.utils import (
17
- BENCHMARK_COLS,
18
- COLS,
19
- EVAL_COLS,
20
- EVAL_TYPES,
21
- AutoEvalColumn,
22
- ModelType,
23
- fields,
24
- WeightType,
25
- Precision
26
- )
27
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
28
- from src.populate import get_evaluation_queue_df, get_leaderboard_df
29
- from src.submission.submit import add_new_eval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
 
 
31
 
32
- def restart_space():
33
- API.restart_space(repo_id=REPO_ID)
34
-
35
- ### Space initialisation
36
- try:
37
- print(EVAL_REQUESTS_PATH)
38
- snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
40
  )
41
- except Exception:
42
- restart_space()
43
- try:
44
- print(EVAL_RESULTS_PATH)
45
- snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
47
  )
48
- except Exception:
49
- restart_space()
50
-
51
-
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
53
-
54
- (
55
- finished_eval_queue_df,
56
- running_eval_queue_df,
57
- pending_eval_queue_df,
58
- ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
-
60
- def init_leaderboard(dataframe):
61
- if dataframe is None or dataframe.empty:
62
- raise ValueError("Leaderboard DataFrame is empty or None.")
63
- return Leaderboard(
64
- value=dataframe,
65
- datatype=[c.type for c in fields(AutoEvalColumn)],
66
- select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
- label="Select Columns to Display:",
70
- ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
- filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
- ColumnFilter(
77
- AutoEvalColumn.params.name,
78
- type="slider",
79
- min=0.01,
80
- max=150,
81
- label="Select the number of parameters (B)",
82
- ),
83
- ColumnFilter(
84
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
85
- ),
86
- ],
87
- bool_checkboxgroup_label="Hide models",
88
- interactive=False,
89
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- demo = gr.Blocks(css=custom_css)
93
- with demo:
94
- gr.HTML(TITLE)
95
- gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
96
-
97
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
98
- with gr.TabItem("🏅 LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
99
- leaderboard = init_leaderboard(LEADERBOARD_DF)
100
-
101
- with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
102
- gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
103
-
104
- with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
- with gr.Column():
106
- with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
108
-
109
- with gr.Column():
110
- with gr.Accordion(
111
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
112
- open=False,
113
- ):
114
- with gr.Row():
115
- finished_eval_table = gr.components.Dataframe(
116
- value=finished_eval_queue_df,
117
- headers=EVAL_COLS,
118
- datatype=EVAL_TYPES,
119
- row_count=5,
120
- )
121
- with gr.Accordion(
122
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
123
- open=False,
124
- ):
125
- with gr.Row():
126
- running_eval_table = gr.components.Dataframe(
127
- value=running_eval_queue_df,
128
- headers=EVAL_COLS,
129
- datatype=EVAL_TYPES,
130
- row_count=5,
131
- )
132
 
133
- with gr.Accordion(
134
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
135
- open=False,
136
- ):
137
- with gr.Row():
138
- pending_eval_table = gr.components.Dataframe(
139
- value=pending_eval_queue_df,
140
- headers=EVAL_COLS,
141
- datatype=EVAL_TYPES,
142
- row_count=5,
143
- )
144
- with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
146
 
147
- with gr.Row():
148
- with gr.Column():
149
- model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
- model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
- label="Model type",
154
- multiselect=False,
155
- value=None,
156
- interactive=True,
157
- )
 
 
 
 
 
158
 
159
- with gr.Column():
160
- precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
162
- label="Precision",
163
- multiselect=False,
164
- value="float16",
165
- interactive=True,
166
- )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
 
176
- submit_button = gr.Button("Submit Eval")
177
- submission_result = gr.Markdown()
178
- submit_button.click(
179
- add_new_eval,
180
- [
181
- model_name_textbox,
182
- base_model_name_textbox,
183
- revision_name_textbox,
184
- precision,
185
- weight_type,
186
- model_type,
187
- ],
188
- submission_result,
189
  )
190
-
191
- with gr.Row():
192
- with gr.Accordion("📙 Citation", open=False):
193
- citation_button = gr.Textbox(
194
- value=CITATION_BUTTON_TEXT,
195
- label=CITATION_BUTTON_LABEL,
196
- lines=20,
197
- elem_id="citation-button",
198
- show_copy_button=True,
199
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- scheduler = BackgroundScheduler()
202
- scheduler.add_job(restart_space, "interval", seconds=1800)
203
- scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
1
  import gradio as gr
 
2
  import pandas as pd
3
+ import numpy as np
 
4
 
5
+ # Sample data - in a real application, you would load this from a database or API
6
+ data = {
7
+ "model": [
8
+ "GPT-4o", "Gemini-2.0-Flash", "Qwen2.5-VL-7B", "AIN-7B", "PaliGemma-3B",
9
+ "TrOCR-large", "nougat-base", "KITAB-OCR", "Llama-3-70B-Vision", "claude-3-opus"
10
+ ],
11
+ "organization": [
12
+ "OpenAI", "Google", "Alibaba", "MBZUAI", "Google",
13
+ "Microsoft", "Meta", "MBZUAI", "Meta", "Anthropic"
14
+ ],
15
+ "type": [
16
+ "Closed-source", "Closed-source", "Open-source", "Open-source", "Open-source",
17
+ "Open-source", "Open-source", "Open-source", "Open-source", "Closed-source"
18
+ ],
19
+ "task": [
20
+ "OCR/Vision", "OCR/Vision", "OCR/Vision", "OCR/Vision", "OCR/Vision",
21
+ "OCR", "OCR/Document", "OCR/Arabic", "Vision", "Vision"
22
+ ],
23
+ "accuracy": [
24
+ 92.5, 94.2, 83.4, 87.2, 81.5,
25
+ 76.8, 79.3, 75.2, 89.1, 93.7
26
+ ],
27
+ "f1_score": [
28
+ 90.1, 91.3, 79.8, 86.5, 78.3,
29
+ 72.1, 74.5, 70.8, 87.4, 90.8
30
+ ],
31
+ "cer": [
32
+ 0.31, 0.13, 1.20, 0.20, 0.67,
33
+ 0.54, 0.58, 0.95, 0.24, 0.15
34
+ ],
35
+ "downloads": [
36
+ "24.5K", "18.2K", "152K", "89K", "112K",
37
+ "320K", "235K", "45K", "580K", "12.8K"
38
+ ],
39
+ "last_updated": [
40
+ "2025-03-15", "2025-03-10", "2025-03-05", "2025-02-28", "2025-02-20",
41
+ "2025-02-15", "2025-02-10", "2025-02-05", "2025-01-28", "2025-01-15"
42
+ ],
43
+ "model_url": [
44
+ "https://huggingface.co/openai/gpt-4o",
45
+ "https://huggingface.co/google/gemini-2-flash",
46
+ "https://huggingface.co/Qwen/Qwen2.5-VL-7B",
47
+ "https://huggingface.co/MBZUAI/AIN-7B",
48
+ "https://huggingface.co/google/paligemma-3b",
49
+ "https://huggingface.co/microsoft/trocr-large-printed",
50
+ "https://huggingface.co/facebook/nougat-base",
51
+ "https://huggingface.co/MBZUAI/KITAB-OCR",
52
+ "https://huggingface.co/meta-llama/Llama-3-70B-Vision",
53
+ "https://huggingface.co/anthropic/claude-3-opus"
54
+ ],
55
+ "paper_url": [
56
+ "https://arxiv.org/abs/2412.xxxxx",
57
+ "https://arxiv.org/abs/2403.xxxxx",
58
+ "https://arxiv.org/abs/2410.xxxxx",
59
+ "https://arxiv.org/abs/2502.xxxxx",
60
+ "https://arxiv.org/abs/2305.xxxxx",
61
+ "https://arxiv.org/abs/2109.10282",
62
+ "https://arxiv.org/abs/2308.13418",
63
+ "https://arxiv.org/abs/2502.14949",
64
+ "https://arxiv.org/abs/2405.xxxxx",
65
+ "https://arxiv.org/abs/2404.xxxxx"
66
+ ]
67
+ }
68
 
69
+ # Create DataFrame
70
+ df = pd.DataFrame(data)
71
 
72
+ # Function to apply color formatting to the dataframe based on metric values
73
+ def format_dataframe(df):
74
+ # Create a copy to avoid modifying the original
75
+ formatted_df = df.copy()
76
+
77
+ # Format accuracy and F1 Score (higher is better)
78
+ formatted_df['accuracy'] = formatted_df['accuracy'].apply(
79
+ lambda x: f"<span style='color: {'#10B981' if x > 85 else '#F59E0B' if x > 75 else '#EF4444'}'>{x:.1f}</span>"
80
  )
81
+
82
+ formatted_df['f1_score'] = formatted_df['f1_score'].apply(
83
+ lambda x: f"<span style='color: {'#10B981' if x > 85 else '#F59E0B' if x > 75 else '#EF4444'}'>{x:.1f}</span>"
 
 
 
84
  )
85
+
86
+ # Format CER (lower is better)
87
+ formatted_df['cer'] = formatted_df['cer'].apply(
88
+ lambda x: f"<span style='color: {'#10B981' if x < 0.5 else '#F59E0B' if x < 1 else '#EF4444'}'>{x:.2f}</span>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  )
90
+
91
+ # Add hyperlinks for model and paper
92
+ formatted_df['model'] = formatted_df.apply(
93
+ lambda row: f"<a href='{row['model_url']}' target='_blank'>{row['model']}</a>", axis=1
94
+ )
95
+
96
+ formatted_df['paper'] = formatted_df.apply(
97
+ lambda row: f"<a href='{row['paper_url']}' target='_blank'>Paper</a>", axis=1
98
+ )
99
+
100
+ # Add type badge
101
+ formatted_df['type'] = formatted_df['type'].apply(
102
+ lambda x: f"<span style='background-color: {'#DBEAFE' if x == 'Open-source' else '#FEF3C7'}; padding: 2px 6px; border-radius: 9999px; font-size: 0.75rem;'>{x}</span>"
103
+ )
104
+
105
+ # Add task badge
106
+ formatted_df['task'] = formatted_df['task'].apply(
107
+ lambda x: f"<span style='background-color: #E0F2FE; padding: 2px 6px; border-radius: 9999px; font-size: 0.75rem;'>{x}</span>"
108
+ )
109
+
110
+ # Drop URLs columns as they're now embedded in the model and paper columns
111
+ formatted_df = formatted_df.drop(columns=['model_url', 'paper_url'])
112
+
113
+ return formatted_df
114
 
115
+ # Define CSS for styling
116
+ css = """
117
+ #leaderboard-title {
118
+ text-align: center;
119
+ margin-bottom: 0;
120
+ }
121
+ #leaderboard-subtitle {
122
+ text-align: center;
123
+ margin-top: 0;
124
+ color: #6B7280;
125
+ font-size: 1rem;
126
+ }
127
+ .gradio-container {
128
+ max-width: 1200px !important;
129
+ }
130
+ .hf-logo {
131
+ display: flex;
132
+ align-items: center;
133
+ justify-content: center;
134
+ margin-bottom: 1rem;
135
+ }
136
+ .hf-logo img {
137
+ height: 50px;
138
+ }
139
+ .header {
140
+ background: linear-gradient(90deg, #FFDE59 0%, #FFC532 100%);
141
+ padding: 20px;
142
+ border-radius: 8px;
143
+ margin-bottom: 20px;
144
+ display: flex;
145
+ align-items: center;
146
+ justify-content: space-between;
147
+ }
148
+ .header img {
149
+ height: 40px;
150
+ margin-right: 15px;
151
+ }
152
+ .header-content {
153
+ display: flex;
154
+ align-items: center;
155
+ }
156
+ .header-text {
157
+ display: flex;
158
+ flex-direction: column;
159
+ }
160
+ .header-text h1 {
161
+ margin: 0;
162
+ font-size: 1.5rem;
163
+ font-weight: bold;
164
+ color: black;
165
+ }
166
+ .header-text p {
167
+ margin: 0;
168
+ color: rgba(0, 0, 0, 0.8);
169
+ }
170
+ .filter-container {
171
+ display: flex;
172
+ flex-wrap: wrap;
173
+ gap: 10px;
174
+ margin-bottom: 20px;
175
+ }
176
+ table {
177
+ width: 100%;
178
+ border-collapse: collapse;
179
+ }
180
+ th {
181
+ background-color: #F9FAFB;
182
+ text-align: left;
183
+ padding: 12px;
184
+ font-weight: 600;
185
+ color: #374151;
186
+ border-bottom: 1px solid #E5E7EB;
187
+ }
188
+ td {
189
+ padding: 12px;
190
+ border-bottom: 1px solid #E5E7EB;
191
+ }
192
+ tr:hover {
193
+ background-color: #F9FAFB;
194
+ }
195
+ a {
196
+ color: #2563EB;
197
+ text-decoration: none;
198
+ }
199
+ a:hover {
200
+ text-decoration: underline;
201
+ }
202
+ .footer {
203
+ display: flex;
204
+ justify-content: space-between;
205
+ align-items: center;
206
+ padding: 10px 0;
207
+ color: #6B7280;
208
+ font-size: 0.875rem;
209
+ margin-top: 20px;
210
+ }
211
+ .footer a {
212
+ color: #2563EB;
213
+ text-decoration: none;
214
+ display: inline-flex;
215
+ align-items: center;
216
+ }
217
+ .footer a:hover {
218
+ text-decoration: underline;
219
+ }
220
+ """
221
 
222
+ # Hugging Face logo SVG (in-lined for simplicity)
223
+ hf_logo = """
224
+ <svg xmlns="http://www.w3.org/2000/svg" width="120" height="40" viewBox="0 0 95 25" fill="none">
225
+ <path d="M8.51825 0H11.3583V17.7547H8.51825V0Z" fill="black"/>
226
+ <path d="M30.1975 5.07422H33.0375V17.7547H30.1975V16.2969C28.9408 17.4158 27.6842 18.0602 25.94 18.0602C22.455 18.0602 19.5825 15.1877 19.5825 11.4358C19.5825 7.6839 22.455 4.8114 25.94 4.8114C27.6842 4.8114 28.9408 5.4558 30.1975 6.5747V5.07422ZM26.2882 15.403C28.7225 15.403 30.1975 13.7014 30.1975 11.4358C30.1975 9.1702 28.7225 7.4686 26.2882 7.4686C23.8539 7.4686 22.3789 9.1702 22.3789 11.4358C22.3789 13.7014 23.8539 15.403 26.2882 15.403Z" fill="black"/>
227
+ <path d="M35.1311 11.4358C35.1311 7.6839 38.0036 4.8114 41.7555 4.8114C45.5075 4.8114 48.38 7.6839 48.38 11.4358C48.38 15.1877 45.5075 18.0602 41.7555 18.0602C38.0036 18.0602 35.1311 15.1877 35.1311 11.4358ZM45.5839 11.4358C45.5839 9.1702 44.1089 7.4686 41.7555 7.4686C39.402 7.4686 37.927 9.1702 37.927 11.4358C37.927 13.7014 39.402 15.403 41.7555 15.403C44.1089 15.403 45.5839 13.7014 45.5839 11.4358Z" fill="black"/>
228
+ <path d="M50.2717 0H53.1117V17.7547H50.2717V0Z" fill="black"/>
229
+ <path d="M55.1956 0H58.0356V17.7547H55.1956V0Z" fill="black"/>
230
+ <path d="M68.3864 11.4359C68.3864 9.0824 66.9114 7.4686 64.558 7.4686C62.2046 7.4686 60.6521 9.0824 60.6521 11.4359C60.6521 13.7893 62.2047 15.4031 64.558 15.4031C66.9114 15.4031 68.3864 13.7893 68.3864 11.4359ZM57.8122 11.4359C57.8122 7.6839 60.6847 4.8114 64.4367 4.8114C66.1809 4.8114 67.4374 5.45579 68.6939 6.57469V5.07422H71.5341V18.0602C71.5341 22.1174 68.5725 24.618 64.5575 24.618C61.2553 24.618 58.5041 22.8739 57.7383 20.0013L60.5347 19.1142C61.0577 20.6146 62.5748 21.9605 64.5575 21.9605C66.9111 21.9605 68.6941 20.5285 68.6941 18.0602V16.297C67.4374 17.4159 66.1809 18.0603 64.4367 18.0603C60.6847 18.0603 57.8122 15.1878 57.8122 11.4359Z" fill="black"/>
231
+ <path d="M74.0307 11.4358C74.0307 7.6839 76.9032 4.8114 80.6551 4.8114C84.4071 4.8114 87.2796 7.6839 87.2796 11.4358C87.2796 15.1877 84.4071 18.0602 80.6551 18.0602C76.9032 18.0602 74.0307 15.1877 74.0307 11.4358ZM84.4835 11.4358C84.4835 9.1702 83.0085 7.4686 80.6551 7.4686C78.3016 7.4686 76.8266 9.1702 76.8266 11.4358C76.8266 13.7014 78.3016 15.403 80.6551 15.403C83.0085 15.403 84.4835 13.7014 84.4835 11.4358Z" fill="black"/>
232
+ <path d="M89.9903 2.69156C89.9903 1.63531 90.7989 0.82666 91.8551 0.82666C92.9114 0.82666 93.72 1.63531 93.72 2.69156C93.72 3.74781 92.9114 4.55645 91.8551 4.55645C90.7989 4.55645 89.9903 3.74781 89.9903 2.69156ZM90.1952 5.07422H93.5149V17.7547H90.1952V5.07422Z" fill="black"/>
233
+ </svg>
234
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
236
+ # Function to filter dataframe based on type
237
+ def filter_by_type(df, type_filter):
238
+ if type_filter == "All":
239
+ return df
240
+ return df[df["type"].str.contains(type_filter)]
 
 
 
 
 
 
 
 
241
 
242
+ # Function to filter dataframe based on search term
243
+ def filter_by_search(df, search_term):
244
+ if not search_term:
245
+ return df
246
+
247
+ # Convert search term to lowercase for case-insensitive search
248
+ search_term = search_term.lower()
249
+
250
+ # Filter based on model, organization, or task
251
+ mask = (
252
+ df["model"].str.lower().str.contains(search_term) |
253
+ df["organization"].str.lower().str.contains(search_term) |
254
+ df["task"].str.lower().str.contains(search_term)
255
+ )
256
+
257
+ return df[mask]
258
 
259
+ # Create the Gradio interface
260
+ def create_leaderboard_interface():
261
+ # Create DataFrame
262
+ df_orig = pd.DataFrame(data)
263
+
264
+ # Sort by accuracy descending by default
265
+ df_orig = df_orig.sort_values(by="accuracy", ascending=False)
266
+
267
+ with gr.Blocks(css=css) as demo:
268
+ gr.HTML(f"""
269
+ <div class="header">
270
+ <div class="header-content">
271
+ <div>
272
+ <svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" viewBox="0 0 40 40" fill="none">
273
+ <path d="M9 16H11V24H9V16Z" fill="black"/>
274
+ <path d="M13 11H15V29H13V11Z" fill="black"/>
275
+ <path d="M17 15H19V25H17V15Z" fill="black"/>
276
+ <path d="M21 11H23V29H21V11Z" fill="black"/>
277
+ <path d="M25 16H27V24H25V16Z" fill="black"/>
278
+ <path d="M29 14H31V26H29V14Z" fill="black"/>
279
+ </svg>
280
+ </div>
281
+ <div class="header-text">
282
+ <h1>KITAB-Bench Leaderboard</h1>
283
+ <p>Arabic OCR and Document Understanding Benchmark</p>
284
+ </div>
285
+ </div>
286
+ <div>
287
+ <a href="https://huggingface.co/spaces" target="_blank" style="color: black; text-decoration: underline;">
288
+ Powered by 🤗 Spaces
289
+ </a>
290
+ </div>
291
+ </div>
292
+ """)
293
 
294
+ # Filter controls
295
+ with gr.Row(equal_height=True):
296
+ type_filter = gr.Radio(
297
+ ["All", "Open-source", "Closed-source"],
298
+ label="Model Type",
299
+ value="All",
300
+ interactive=True
 
 
 
 
 
 
301
  )
302
+ search_input = gr.Textbox(
303
+ label="Search Models, Organizations, or Tasks",
304
+ placeholder="Type to search...",
305
+ interactive=True
 
 
 
 
 
306
  )
307
+
308
+ sort_by = gr.Dropdown(
309
+ ["accuracy", "f1_score", "cer", "downloads"],
310
+ label="Sort by",
311
+ value="accuracy",
312
+ interactive=True
313
+ )
314
+
315
+ sort_order = gr.Radio(
316
+ ["Descending", "Ascending"],
317
+ label="Sort Order",
318
+ value="Descending",
319
+ interactive=True
320
+ )
321
+
322
+ # Table output
323
+ table_output = gr.HTML()
324
+
325
+ # Define update function
326
+ def update_table(type_filter, search_term, sort_by, sort_order):
327
+ # Filter by type
328
+ filtered_df = filter_by_type(df_orig, type_filter)
329
+
330
+ # Filter by search term
331
+ filtered_df = filter_by_search(filtered_df, search_term)
332
+
333
+ # Sort the dataframe
334
+ is_ascending = sort_order == "Ascending"
335
+
336
+ # For CER, we might want to reverse the default sorting (since lower is better)
337
+ if sort_by == "cer":
338
+ is_ascending = not is_ascending
339
+
340
+ filtered_df = filtered_df.sort_values(by=sort_by, ascending=is_ascending)
341
+
342
+ # Format the dataframe
343
+ formatted_df = format_dataframe(filtered_df)
344
+
345
+ # Generate HTML table
346
+ html_table = f"""
347
+ <div style="overflow-x: auto;">
348
+ <table style="width:100%">
349
+ <thead>
350
+ <tr>
351
+ <th>Model</th>
352
+ <th>Organization</th>
353
+ <th>Type</th>
354
+ <th>Task</th>
355
+ <th>Accuracy</th>
356
+ <th>F1 Score</th>
357
+ <th>CER</th>
358
+ <th>Downloads</th>
359
+ <th>Last Updated</th>
360
+ <th>Paper</th>
361
+ </tr>
362
+ </thead>
363
+ <tbody>
364
+ """
365
+
366
+ for _, row in formatted_df.iterrows():
367
+ html_table += f"""
368
+ <tr>
369
+ <td>{row['model']}</td>
370
+ <td>{row['organization']}</td>
371
+ <td>{row['type']}</td>
372
+ <td>{row['task']}</td>
373
+ <td>{row['accuracy']}</td>
374
+ <td>{row['f1_score']}</td>
375
+ <td>{row['cer']}</td>
376
+ <td>{row['downloads']}</td>
377
+ <td>{row['last_updated']}</td>
378
+ <td>{row['paper']}</td>
379
+ </tr>
380
+ """
381
+
382
+ html_table += """
383
+ </tbody>
384
+ </table>
385
+ </div>
386
+ <div class="footer">
387
+ <span>Showing {count} of {total} models</span>
388
+ <div>
389
+ <a href="https://github.com/mbzuai-oryx/KITAB-Bench" target="_blank">GitHub Repository</a>
390
+ <span style="margin: 0 8px;">|</span>
391
+ <a href="https://arxiv.org/abs/2502.14949" target="_blank">KITAB-Bench Paper</a>
392
+ </div>
393
+ </div>
394
+ """.format(count=len(filtered_df), total=len(df_orig))
395
+
396
+ return html_table
397
+
398
+ # Set up event handlers
399
+ type_filter.change(update_table, [type_filter, search_input, sort_by, sort_order], table_output)
400
+ search_input.change(update_table, [type_filter, search_input, sort_by, sort_order], table_output)
401
+ sort_by.change(update_table, [type_filter, search_input, sort_by, sort_order], table_output)
402
+ sort_order.change(update_table, [type_filter, search_input, sort_by, sort_order], table_output)
403
+
404
+ # Initialize table on page load
405
+ demo.load(update_table, [type_filter, search_input, sort_by, sort_order], table_output)
406
+
407
+ gr.HTML("""
408
+ <div style="margin-top: 20px; text-align: center; font-size: 0.8rem; color: #6B7280;">
409
+ <p>For more information about the KITAB-Bench, visit the <a href="https://mbzuai-oryx.github.io/KITAB-Bench/" target="_blank">project website</a>.</p>
410
+ </div>
411
+ """)
412
+
413
+ return demo
414
+
415
+ # Launch the app
416
+ demo = create_leaderboard_interface()
417
 
418
+ if __name__ == "__main__":
419
+ demo.launch()