Code cleanup
Browse files- app.py +23 -44
- src/about.py +0 -14
- src/display/css_html_js.py +5 -13
- src/display/formatting.py +0 -8
- src/display/utils.py +0 -71
- src/envs.py +0 -2
- src/leaderboard/read_evals.py +17 -62
- src/populate.py +4 -11
- src/submission/check_validity.py +2 -2
- src/submission/submit.py +10 -42
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
from apscheduler.schedulers.background import BackgroundScheduler
|
4 |
-
#from huggingface_hub import snapshot_download
|
5 |
import re
|
6 |
import plotly.graph_objects as go
|
7 |
|
@@ -36,35 +35,18 @@ from src.leaderboard.read_evals import get_model_answers_html_file
|
|
36 |
skills = ['MMLU', 'General Knowledge', 'Reasoning & Math', 'Translation (incl Dialects)', 'Trust & Safety', 'Writing (incl Dialects)', 'RAG QA', 'Reading Comprehension', 'Arabic Language & Grammar', 'Diacritization', 'Dialect Detection', 'Sentiment Analysis', 'Summarization', 'Instruction Following', 'Transliteration', 'Paraphrasing', 'Entity Extraction', 'Long Context', 'Coding', 'Hallucination', 'Function Calling', 'Structuring']
|
37 |
|
38 |
|
39 |
-
def restart_space():
|
40 |
-
API.restart_space(repo_id=REPO_ID)
|
41 |
-
|
42 |
-
### Space initialisation
|
43 |
-
"""
|
44 |
-
try:
|
45 |
-
print(EVAL_REQUESTS_PATH)
|
46 |
-
snapshot_download(
|
47 |
-
repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
48 |
-
)
|
49 |
-
except Exception:
|
50 |
-
restart_space()
|
51 |
-
try:
|
52 |
-
print(EVAL_RESULTS_PATH)
|
53 |
-
snapshot_download(
|
54 |
-
repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN
|
55 |
-
)
|
56 |
-
except Exception:
|
57 |
-
restart_space()
|
58 |
-
"""
|
59 |
-
|
60 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
61 |
|
62 |
(
|
63 |
finished_eval_queue_df,
|
64 |
-
running_eval_queue_df,
|
65 |
pending_eval_queue_df,
|
66 |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
67 |
|
|
|
|
|
|
|
|
|
|
|
68 |
def hide_skill_columns(dataframe, exceptions=[]):
|
69 |
return dataframe[[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default or c.name in exceptions]]
|
70 |
|
@@ -115,9 +97,6 @@ def init_leaderboard(dataframe):
|
|
115 |
def init_skill_leaderboard(dataframe):
|
116 |
|
117 |
|
118 |
-
|
119 |
-
## create selector for model skills, based on the selector filter the dataframe
|
120 |
-
|
121 |
skills_dropdown = gr.Dropdown(choices=skills, label="Select Skill", value=skills[0])
|
122 |
|
123 |
def filter_dataframe(skill):
|
@@ -126,6 +105,7 @@ def init_skill_leaderboard(dataframe):
|
|
126 |
new_skill_name = skill+" Score"
|
127 |
filtered_df.rename(columns={skill: new_skill_name}, inplace=True)
|
128 |
filtered_df[new_skill_name] = make_column_bold(filtered_df[new_skill_name])
|
|
|
129 |
## reorder columns of filtered_df and insert skill in the middle
|
130 |
filtered_df = filtered_df[list(filtered_df.columns[:4]) + [new_skill_name] + list(filtered_df.columns[4:-1])]
|
131 |
filtered_df["Rank"] = range(1, len(filtered_df) + 1)
|
@@ -143,6 +123,7 @@ def init_skill_leaderboard(dataframe):
|
|
143 |
|
144 |
leaderboard_by_skill = filter_dataframe(skills[0])
|
145 |
skills_dropdown.change(filter_dataframe, inputs=skills_dropdown, outputs=leaderboard_by_skill)
|
|
|
146 |
return leaderboard_by_skill
|
147 |
|
148 |
|
@@ -159,13 +140,16 @@ def init_size_leaderboard(dataframe):
|
|
159 |
sizes_dropdown = gr.Dropdown(choices=size_names, label="Select Model Size", value=size_names[0])
|
160 |
|
161 |
def filter_dataframe(size_name):
|
|
|
162 |
##map size name to size key
|
163 |
size_name_mapped_to_key = size_keys[size_names.index(size_name)]
|
|
|
164 |
##slice array from 0 to index of size
|
165 |
size_list = size_keys[size_keys.index(size_name_mapped_to_key):]
|
166 |
filtered_df = dataframe[dataframe["Size"].isin(size_list)].reset_index(drop=True)
|
167 |
filtered_df["Rank"] = range(1, len(filtered_df) + 1)
|
168 |
styler = perform_cell_formatting(filtered_df)
|
|
|
169 |
return gr.Dataframe(
|
170 |
value=styler,
|
171 |
datatype="markdown",
|
@@ -179,19 +163,18 @@ def init_size_leaderboard(dataframe):
|
|
179 |
|
180 |
leaderboard_by_skill = filter_dataframe(size_names[0])
|
181 |
sizes_dropdown.change(filter_dataframe, inputs=sizes_dropdown, outputs=leaderboard_by_skill)
|
|
|
182 |
return leaderboard_by_skill
|
183 |
|
184 |
def strip_html_tags(model_name):
|
185 |
return re.sub('<[^<]+?>', '', model_name)
|
186 |
-
|
187 |
-
|
188 |
|
189 |
def get_model_info_blocks(chosen_model_name):
|
190 |
|
191 |
model_names = LEADERBOARD_DF["Model Name"].unique().tolist()
|
192 |
model_names_clean = [strip_html_tags(model_name) for model_name in model_names]
|
193 |
-
|
194 |
model_name_full = model_names[model_names_clean.index(chosen_model_name)]
|
|
|
195 |
filtered_df = LEADERBOARD_DF[LEADERBOARD_DF["Model Name"]==model_name_full].reset_index(drop=True)
|
196 |
skills_bar_df = pd.DataFrame({
|
197 |
'Skills': skills,
|
@@ -258,14 +241,13 @@ def init_compare_tab(dataframe):
|
|
258 |
|
259 |
model_names = dataframe["Model Name"].unique().tolist()
|
260 |
model_names_clean = [strip_html_tags(model_name) for model_name in model_names]
|
|
|
261 |
with gr.Row():
|
262 |
models_dropdown = gr.Dropdown(choices=model_names_clean, label="Select Models",
|
263 |
value=model_names_clean[0], multiselect=True)
|
264 |
|
265 |
|
266 |
def draw_radar_chart(models):
|
267 |
-
print(models)
|
268 |
-
|
269 |
|
270 |
fig = go.Figure()
|
271 |
|
@@ -310,7 +292,9 @@ def init_compare_tab(dataframe):
|
|
310 |
|
311 |
|
312 |
demo = gr.Blocks(css=custom_css)
|
|
|
313 |
with demo:
|
|
|
314 |
gr.HTML(TITLE, elem_classes="abl_header")
|
315 |
gr.HTML(INTRODUCTION_TEXT, elem_classes="abl_desc_text")
|
316 |
|
@@ -329,9 +313,9 @@ with demo:
|
|
329 |
|
330 |
with gr.TabItem("🔬 Deep Dive", elem_id="llm-benchmark-tab-compare", id=4):
|
331 |
|
332 |
-
|
333 |
model_names = LEADERBOARD_DF["Model Name"].unique().tolist()
|
334 |
model_names_clean = [strip_html_tags(model_name) for model_name in model_names]
|
|
|
335 |
with gr.Row():
|
336 |
models_dropdown = gr.Dropdown(choices=model_names_clean, label="Select Model", value=model_names_clean[0])
|
337 |
|
@@ -341,8 +325,10 @@ with demo:
|
|
341 |
models_dropdown.change(get_model_info_blocks, inputs=models_dropdown, outputs=[model_name,benchmark_score,rank,speed,contamination,size,skills_bar,answers_html])
|
342 |
|
343 |
with gr.TabItem("🚀 Submit here", elem_id="llm-benchmark-tab-submit", id=5):
|
|
|
344 |
with gr.Row():
|
345 |
gr.Markdown("# Submit your model", elem_classes="markdown-text")
|
|
|
346 |
with gr.Column():
|
347 |
gr.Markdown("### Please confirm that you understand and accept the conditions below before submitting your model:")
|
348 |
prereqs_checkboxes = gr.CheckboxGroup(["I have successfully run the ABB benchmark script on my model using my own infrastructure, and I am not using the Leaderboard for testing purposes",
|
@@ -368,6 +354,7 @@ with demo:
|
|
368 |
)
|
369 |
|
370 |
submission_result = gr.Markdown()
|
|
|
371 |
submit_button.click(
|
372 |
add_new_eval,
|
373 |
[
|
@@ -375,7 +362,9 @@ with demo:
|
|
375 |
],
|
376 |
submission_result,
|
377 |
)
|
|
|
378 |
with gr.Column():
|
|
|
379 |
with gr.Row():
|
380 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
381 |
|
@@ -391,17 +380,6 @@ with demo:
|
|
391 |
datatype=EVAL_TYPES,
|
392 |
row_count=5,
|
393 |
)
|
394 |
-
with gr.Accordion(
|
395 |
-
f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
|
396 |
-
open=False,
|
397 |
-
):
|
398 |
-
with gr.Row():
|
399 |
-
running_eval_table = gr.components.Dataframe(
|
400 |
-
value=running_eval_queue_df,
|
401 |
-
headers=EVAL_COLS,
|
402 |
-
datatype=EVAL_TYPES,
|
403 |
-
row_count=5,
|
404 |
-
)
|
405 |
|
406 |
with gr.Accordion(
|
407 |
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
@@ -416,6 +394,7 @@ with demo:
|
|
416 |
)
|
417 |
|
418 |
with gr.TabItem("📝 FAQ", elem_id="llm-benchmark-tab-faq", id=6):
|
|
|
419 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
420 |
|
421 |
with gr.Row():
|
@@ -423,7 +402,7 @@ with demo:
|
|
423 |
citation_button = gr.Textbox(
|
424 |
value=CITATION_BUTTON_TEXT,
|
425 |
label=CITATION_BUTTON_LABEL,
|
426 |
-
lines=
|
427 |
elem_id="citation-button",
|
428 |
show_copy_button=True,
|
429 |
)
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
from apscheduler.schedulers.background import BackgroundScheduler
|
|
|
4 |
import re
|
5 |
import plotly.graph_objects as go
|
6 |
|
|
|
35 |
skills = ['MMLU', 'General Knowledge', 'Reasoning & Math', 'Translation (incl Dialects)', 'Trust & Safety', 'Writing (incl Dialects)', 'RAG QA', 'Reading Comprehension', 'Arabic Language & Grammar', 'Diacritization', 'Dialect Detection', 'Sentiment Analysis', 'Summarization', 'Instruction Following', 'Transliteration', 'Paraphrasing', 'Entity Extraction', 'Long Context', 'Coding', 'Hallucination', 'Function Calling', 'Structuring']
|
36 |
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
39 |
|
40 |
(
|
41 |
finished_eval_queue_df,
|
|
|
42 |
pending_eval_queue_df,
|
43 |
) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
44 |
|
45 |
+
|
46 |
+
def restart_space():
|
47 |
+
API.restart_space(repo_id=REPO_ID)
|
48 |
+
|
49 |
+
|
50 |
def hide_skill_columns(dataframe, exceptions=[]):
|
51 |
return dataframe[[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default or c.name in exceptions]]
|
52 |
|
|
|
97 |
def init_skill_leaderboard(dataframe):
|
98 |
|
99 |
|
|
|
|
|
|
|
100 |
skills_dropdown = gr.Dropdown(choices=skills, label="Select Skill", value=skills[0])
|
101 |
|
102 |
def filter_dataframe(skill):
|
|
|
105 |
new_skill_name = skill+" Score"
|
106 |
filtered_df.rename(columns={skill: new_skill_name}, inplace=True)
|
107 |
filtered_df[new_skill_name] = make_column_bold(filtered_df[new_skill_name])
|
108 |
+
|
109 |
## reorder columns of filtered_df and insert skill in the middle
|
110 |
filtered_df = filtered_df[list(filtered_df.columns[:4]) + [new_skill_name] + list(filtered_df.columns[4:-1])]
|
111 |
filtered_df["Rank"] = range(1, len(filtered_df) + 1)
|
|
|
123 |
|
124 |
leaderboard_by_skill = filter_dataframe(skills[0])
|
125 |
skills_dropdown.change(filter_dataframe, inputs=skills_dropdown, outputs=leaderboard_by_skill)
|
126 |
+
|
127 |
return leaderboard_by_skill
|
128 |
|
129 |
|
|
|
140 |
sizes_dropdown = gr.Dropdown(choices=size_names, label="Select Model Size", value=size_names[0])
|
141 |
|
142 |
def filter_dataframe(size_name):
|
143 |
+
|
144 |
##map size name to size key
|
145 |
size_name_mapped_to_key = size_keys[size_names.index(size_name)]
|
146 |
+
|
147 |
##slice array from 0 to index of size
|
148 |
size_list = size_keys[size_keys.index(size_name_mapped_to_key):]
|
149 |
filtered_df = dataframe[dataframe["Size"].isin(size_list)].reset_index(drop=True)
|
150 |
filtered_df["Rank"] = range(1, len(filtered_df) + 1)
|
151 |
styler = perform_cell_formatting(filtered_df)
|
152 |
+
|
153 |
return gr.Dataframe(
|
154 |
value=styler,
|
155 |
datatype="markdown",
|
|
|
163 |
|
164 |
leaderboard_by_skill = filter_dataframe(size_names[0])
|
165 |
sizes_dropdown.change(filter_dataframe, inputs=sizes_dropdown, outputs=leaderboard_by_skill)
|
166 |
+
|
167 |
return leaderboard_by_skill
|
168 |
|
169 |
def strip_html_tags(model_name):
|
170 |
return re.sub('<[^<]+?>', '', model_name)
|
|
|
|
|
171 |
|
172 |
def get_model_info_blocks(chosen_model_name):
|
173 |
|
174 |
model_names = LEADERBOARD_DF["Model Name"].unique().tolist()
|
175 |
model_names_clean = [strip_html_tags(model_name) for model_name in model_names]
|
|
|
176 |
model_name_full = model_names[model_names_clean.index(chosen_model_name)]
|
177 |
+
|
178 |
filtered_df = LEADERBOARD_DF[LEADERBOARD_DF["Model Name"]==model_name_full].reset_index(drop=True)
|
179 |
skills_bar_df = pd.DataFrame({
|
180 |
'Skills': skills,
|
|
|
241 |
|
242 |
model_names = dataframe["Model Name"].unique().tolist()
|
243 |
model_names_clean = [strip_html_tags(model_name) for model_name in model_names]
|
244 |
+
|
245 |
with gr.Row():
|
246 |
models_dropdown = gr.Dropdown(choices=model_names_clean, label="Select Models",
|
247 |
value=model_names_clean[0], multiselect=True)
|
248 |
|
249 |
|
250 |
def draw_radar_chart(models):
|
|
|
|
|
251 |
|
252 |
fig = go.Figure()
|
253 |
|
|
|
292 |
|
293 |
|
294 |
demo = gr.Blocks(css=custom_css)
|
295 |
+
|
296 |
with demo:
|
297 |
+
|
298 |
gr.HTML(TITLE, elem_classes="abl_header")
|
299 |
gr.HTML(INTRODUCTION_TEXT, elem_classes="abl_desc_text")
|
300 |
|
|
|
313 |
|
314 |
with gr.TabItem("🔬 Deep Dive", elem_id="llm-benchmark-tab-compare", id=4):
|
315 |
|
|
|
316 |
model_names = LEADERBOARD_DF["Model Name"].unique().tolist()
|
317 |
model_names_clean = [strip_html_tags(model_name) for model_name in model_names]
|
318 |
+
|
319 |
with gr.Row():
|
320 |
models_dropdown = gr.Dropdown(choices=model_names_clean, label="Select Model", value=model_names_clean[0])
|
321 |
|
|
|
325 |
models_dropdown.change(get_model_info_blocks, inputs=models_dropdown, outputs=[model_name,benchmark_score,rank,speed,contamination,size,skills_bar,answers_html])
|
326 |
|
327 |
with gr.TabItem("🚀 Submit here", elem_id="llm-benchmark-tab-submit", id=5):
|
328 |
+
|
329 |
with gr.Row():
|
330 |
gr.Markdown("# Submit your model", elem_classes="markdown-text")
|
331 |
+
|
332 |
with gr.Column():
|
333 |
gr.Markdown("### Please confirm that you understand and accept the conditions below before submitting your model:")
|
334 |
prereqs_checkboxes = gr.CheckboxGroup(["I have successfully run the ABB benchmark script on my model using my own infrastructure, and I am not using the Leaderboard for testing purposes",
|
|
|
354 |
)
|
355 |
|
356 |
submission_result = gr.Markdown()
|
357 |
+
|
358 |
submit_button.click(
|
359 |
add_new_eval,
|
360 |
[
|
|
|
362 |
],
|
363 |
submission_result,
|
364 |
)
|
365 |
+
|
366 |
with gr.Column():
|
367 |
+
|
368 |
with gr.Row():
|
369 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
370 |
|
|
|
380 |
datatype=EVAL_TYPES,
|
381 |
row_count=5,
|
382 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
383 |
|
384 |
with gr.Accordion(
|
385 |
f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
|
|
394 |
)
|
395 |
|
396 |
with gr.TabItem("📝 FAQ", elem_id="llm-benchmark-tab-faq", id=6):
|
397 |
+
|
398 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
399 |
|
400 |
with gr.Row():
|
|
|
402 |
citation_button = gr.Textbox(
|
403 |
value=CITATION_BUTTON_TEXT,
|
404 |
label=CITATION_BUTTON_LABEL,
|
405 |
+
lines=8,
|
406 |
elem_id="citation-button",
|
407 |
show_copy_button=True,
|
408 |
)
|
src/about.py
CHANGED
@@ -38,19 +38,6 @@ class EvalDimensions(Enum):
|
|
38 |
|
39 |
|
40 |
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
NUM_FEWSHOT = 0 # Change with your few shot
|
50 |
-
# ---------------------------------------------------
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
# Your leaderboard name
|
55 |
TITLE = """<div ><img class='abl_header_image' src='https://huggingface.co/spaces/silma-ai/Arabic-LLM-Broad-Leaderboard/resolve/main/src/images/abl_logo.png' ></div>"""
|
56 |
|
@@ -166,7 +153,6 @@ EVALUATION_QUEUE_TEXT = """
|
|
166 |
|
167 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite the Leaderboard"
|
168 |
CITATION_BUTTON_TEXT = r"""
|
169 |
-
|
170 |
@misc{ABL,
|
171 |
author = {SILMA.AI Team},
|
172 |
title = {Arabic Broad Leaderboard},
|
|
|
38 |
|
39 |
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
# Your leaderboard name
|
42 |
TITLE = """<div ><img class='abl_header_image' src='https://huggingface.co/spaces/silma-ai/Arabic-LLM-Broad-Leaderboard/resolve/main/src/images/abl_logo.png' ></div>"""
|
43 |
|
|
|
153 |
|
154 |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite the Leaderboard"
|
155 |
CITATION_BUTTON_TEXT = r"""
|
|
|
156 |
@misc{ABL,
|
157 |
author = {SILMA.AI Team},
|
158 |
title = {Arabic Broad Leaderboard},
|
src/display/css_html_js.py
CHANGED
@@ -108,13 +108,13 @@ custom_css = """
|
|
108 |
|
109 |
}
|
110 |
.abl_header{
|
111 |
-
margin:0px auto 0px auto;
|
112 |
}
|
113 |
.abl_header_image{
|
114 |
-
margin:0px auto 0px auto;
|
115 |
-
width:50%;
|
116 |
-
display:block;
|
117 |
-
border-radius: 10px;
|
118 |
}
|
119 |
|
120 |
.tabs{
|
@@ -164,11 +164,3 @@ color:unset;
|
|
164 |
margin-top:20px;
|
165 |
}
|
166 |
"""
|
167 |
-
|
168 |
-
get_window_url_params = """
|
169 |
-
function(url_params) {
|
170 |
-
const params = new URLSearchParams(window.location.search);
|
171 |
-
url_params = Object.fromEntries(params);
|
172 |
-
return url_params;
|
173 |
-
}
|
174 |
-
"""
|
|
|
108 |
|
109 |
}
|
110 |
.abl_header{
|
111 |
+
margin:0px auto 0px auto;
|
112 |
}
|
113 |
.abl_header_image{
|
114 |
+
margin:0px auto 0px auto;
|
115 |
+
width:50%;
|
116 |
+
display:block;
|
117 |
+
border-radius: 10px;
|
118 |
}
|
119 |
|
120 |
.tabs{
|
|
|
164 |
margin-top:20px;
|
165 |
}
|
166 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/display/formatting.py
CHANGED
@@ -6,11 +6,6 @@ def make_clickable_model(model_name):
|
|
6 |
link = f"https://huggingface.co/{model_name}"
|
7 |
return model_hyperlink(link, model_name)
|
8 |
|
9 |
-
def make_contamination_red(contamination_score):
|
10 |
-
if contamination_score <=0:
|
11 |
-
return f"<p class='clean' style='display:block;background-color:green !important;padding:5px;color: white; text-align: center;margin:0px' title='Clean model!'>{round((contamination_score))}</p>"
|
12 |
-
else:
|
13 |
-
return f"<p class='contaminated' style='display:block;background-color:red !important;padding:5px;color: white; text-align: center;margin:0px' title='Contaminated model!'>{round((contamination_score),2)}</p>"
|
14 |
|
15 |
def styled_error(error):
|
16 |
return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
|
@@ -27,6 +22,3 @@ def styled_message(message):
|
|
27 |
def has_no_nan_values(df, columns):
|
28 |
return df[columns].notna().all(axis=1)
|
29 |
|
30 |
-
|
31 |
-
def has_nan_values(df, columns):
|
32 |
-
return df[columns].isna().any(axis=1)
|
|
|
6 |
link = f"https://huggingface.co/{model_name}"
|
7 |
return model_hyperlink(link, model_name)
|
8 |
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def styled_error(error):
|
11 |
return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
|
|
|
22 |
def has_no_nan_values(df, columns):
|
23 |
return df[columns].notna().all(axis=1)
|
24 |
|
|
|
|
|
|
src/display/utils.py
CHANGED
@@ -1,8 +1,4 @@
|
|
1 |
from dataclasses import dataclass, make_dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
|
6 |
from src.about import EvalDimensions
|
7 |
|
8 |
def fields(raw_class):
|
@@ -24,12 +20,8 @@ class ColumnContent:
|
|
24 |
auto_eval_column_dict = []
|
25 |
# Init
|
26 |
auto_eval_column_dict.append(["rank", ColumnContent, ColumnContent("Rank", "str", True, False)])
|
27 |
-
|
28 |
auto_eval_column_dict.append(["model_source", ColumnContent, ColumnContent("Source", "str", True, False)])
|
29 |
auto_eval_column_dict.append(["model_category", ColumnContent, ColumnContent("Size", "str", True, False)])
|
30 |
-
|
31 |
-
|
32 |
-
#auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
|
33 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)])
|
34 |
#Scores
|
35 |
auto_eval_column_dict.append(["average_score", ColumnContent, ColumnContent("Benchmark Score (0-10)", "number", True)])
|
@@ -38,17 +30,6 @@ for eval_dim in EvalDimensions:
|
|
38 |
auto_eval_column_dict.append([eval_dim.name, ColumnContent, ColumnContent(eval_dim.value.col_name, "number", True)])
|
39 |
else:
|
40 |
auto_eval_column_dict.append([eval_dim.name, ColumnContent, ColumnContent(eval_dim.value.col_name, "number", False)])
|
41 |
-
# Model information
|
42 |
-
|
43 |
-
#auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
|
44 |
-
#auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
|
45 |
-
#auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
|
46 |
-
#auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
|
47 |
-
#auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("License", "str", False)])
|
48 |
-
#auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
|
49 |
-
#auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Popularity (Likes)", "number", False)])
|
50 |
-
#auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
|
51 |
-
#auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
|
52 |
|
53 |
|
54 |
# We use make dataclass to dynamically fill the scores from Tasks
|
@@ -59,60 +40,8 @@ AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=
|
|
59 |
class EvalQueueColumn: # Queue column
|
60 |
model = ColumnContent("model", "markdown", True)
|
61 |
revision = ColumnContent("revision", "str", True)
|
62 |
-
#private = ColumnContent("private", "bool", True)
|
63 |
-
#precision = ColumnContent("precision", "str", True)
|
64 |
-
#weight_type = ColumnContent("weight_type", "str", "Original")
|
65 |
status = ColumnContent("status", "str", True)
|
66 |
|
67 |
-
## All the model information that we might need
|
68 |
-
@dataclass
|
69 |
-
class ModelDetails:
|
70 |
-
name: str
|
71 |
-
display_name: str = ""
|
72 |
-
symbol: str = "" # emoji
|
73 |
-
|
74 |
-
"""
|
75 |
-
class ModelType(Enum):
|
76 |
-
|
77 |
-
|
78 |
-
PT = ModelDetails(name="pretrained", symbol="🟢")
|
79 |
-
FT = ModelDetails(name="fine-tuned", symbol="🔶")
|
80 |
-
IFT = ModelDetails(name="instruction-tuned", symbol="⭕")
|
81 |
-
RL = ModelDetails(name="RL-tuned", symbol="🟦")
|
82 |
-
Unknown = ModelDetails(name="", symbol="?")
|
83 |
-
|
84 |
-
def to_str(self, separator=" "):
|
85 |
-
return f"{self.value.symbol}{separator}{self.value.name}"
|
86 |
-
|
87 |
-
@staticmethod
|
88 |
-
def from_str(type):
|
89 |
-
if "fine-tuned" in type or "🔶" in type:
|
90 |
-
return ModelType.FT
|
91 |
-
if "pretrained" in type or "🟢" in type:
|
92 |
-
return ModelType.PT
|
93 |
-
if "RL-tuned" in type or "🟦" in type:
|
94 |
-
return ModelType.RL
|
95 |
-
if "instruction-tuned" in type or "⭕" in type:
|
96 |
-
return ModelType.IFT
|
97 |
-
return ModelType.Unknown
|
98 |
-
|
99 |
-
class WeightType(Enum):
|
100 |
-
Adapter = ModelDetails("Adapter")
|
101 |
-
Original = ModelDetails("Original")
|
102 |
-
Delta = ModelDetails("Delta")
|
103 |
-
|
104 |
-
class Precision(Enum):
|
105 |
-
float16 = ModelDetails("float16")
|
106 |
-
bfloat16 = ModelDetails("bfloat16")
|
107 |
-
Unknown = ModelDetails("?")
|
108 |
-
|
109 |
-
def from_str(precision):
|
110 |
-
if precision in ["torch.float16", "float16"]:
|
111 |
-
return Precision.float16
|
112 |
-
if precision in ["torch.bfloat16", "bfloat16"]:
|
113 |
-
return Precision.bfloat16
|
114 |
-
return Precision.Unknown
|
115 |
-
"""
|
116 |
# Column selection
|
117 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
118 |
|
|
|
1 |
from dataclasses import dataclass, make_dataclass
|
|
|
|
|
|
|
|
|
2 |
from src.about import EvalDimensions
|
3 |
|
4 |
def fields(raw_class):
|
|
|
20 |
auto_eval_column_dict = []
|
21 |
# Init
|
22 |
auto_eval_column_dict.append(["rank", ColumnContent, ColumnContent("Rank", "str", True, False)])
|
|
|
23 |
auto_eval_column_dict.append(["model_source", ColumnContent, ColumnContent("Source", "str", True, False)])
|
24 |
auto_eval_column_dict.append(["model_category", ColumnContent, ColumnContent("Size", "str", True, False)])
|
|
|
|
|
|
|
25 |
auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model Name", "markdown", True, never_hidden=True)])
|
26 |
#Scores
|
27 |
auto_eval_column_dict.append(["average_score", ColumnContent, ColumnContent("Benchmark Score (0-10)", "number", True)])
|
|
|
30 |
auto_eval_column_dict.append([eval_dim.name, ColumnContent, ColumnContent(eval_dim.value.col_name, "number", True)])
|
31 |
else:
|
32 |
auto_eval_column_dict.append([eval_dim.name, ColumnContent, ColumnContent(eval_dim.value.col_name, "number", False)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
|
35 |
# We use make dataclass to dynamically fill the scores from Tasks
|
|
|
40 |
class EvalQueueColumn: # Queue column
|
41 |
model = ColumnContent("model", "markdown", True)
|
42 |
revision = ColumnContent("revision", "str", True)
|
|
|
|
|
|
|
43 |
status = ColumnContent("status", "str", True)
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
# Column selection
|
46 |
COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden]
|
47 |
|
src/envs.py
CHANGED
@@ -19,7 +19,5 @@ CACHE_PATH=os.getenv("HF_HOME", ".")
|
|
19 |
# Local caches
|
20 |
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "requests")
|
21 |
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "results")
|
22 |
-
EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk")
|
23 |
-
EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk")
|
24 |
|
25 |
API = HfApi(token=TOKEN)
|
|
|
19 |
# Local caches
|
20 |
EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "requests")
|
21 |
EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "results")
|
|
|
|
|
22 |
|
23 |
API = HfApi(token=TOKEN)
|
src/leaderboard/read_evals.py
CHANGED
@@ -1,14 +1,11 @@
|
|
1 |
import glob
|
2 |
import json
|
3 |
-
import math
|
4 |
import os
|
5 |
from dataclasses import dataclass
|
6 |
-
|
7 |
import dateutil
|
8 |
-
import numpy as np
|
9 |
|
10 |
-
from src.display.formatting import make_clickable_model
|
11 |
-
from src.display.utils import AutoEvalColumn, EvalDimensions
|
12 |
from src.submission.check_validity import is_model_on_hub
|
13 |
|
14 |
|
@@ -20,17 +17,9 @@ class EvalResult:
|
|
20 |
full_model: str # org/model (path on hub)
|
21 |
org: str
|
22 |
model: str
|
23 |
-
#revision: str # commit hash, "" if main
|
24 |
results: dict
|
25 |
-
#precision: Precision = Precision.Unknown
|
26 |
-
#model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
|
27 |
model_source: str = "" # HF, API, ...
|
28 |
model_category: str = "" #Nano, Small, Medium, Large
|
29 |
-
#weight_type: WeightType = WeightType.Original # Original or Adapter
|
30 |
-
#architecture: str = "Unknown"
|
31 |
-
#license: str = "?"
|
32 |
-
#likes: int = 0
|
33 |
-
#num_params: int = 0
|
34 |
date: str = "" # submission date of request file
|
35 |
still_on_hub: bool = False
|
36 |
|
@@ -42,41 +31,31 @@ class EvalResult:
|
|
42 |
|
43 |
config = data.get("config")
|
44 |
|
45 |
-
# Precision
|
46 |
-
#precision = Precision.from_str(config.get("model_dtype"))
|
47 |
-
|
48 |
# Get model and org
|
49 |
org_and_model = config.get("model", config.get("model_args", None))
|
50 |
-
|
51 |
org_and_model = org_and_model.split("/", 1)
|
52 |
|
53 |
if len(org_and_model) == 1:
|
54 |
org = None
|
55 |
model = org_and_model[0]
|
56 |
-
result_key = f"{model}"
|
57 |
else:
|
58 |
org = org_and_model[0]
|
59 |
model = org_and_model[1]
|
60 |
-
result_key = f"{org}_{model}"
|
61 |
full_model = "/".join(org_and_model)
|
62 |
|
63 |
-
still_on_hub, _,
|
64 |
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
65 |
)
|
66 |
|
67 |
-
"""
|
68 |
-
architecture = "?"
|
69 |
-
if model_config is not None:
|
70 |
-
architectures = getattr(model_config, "architectures", None)
|
71 |
-
if architectures:
|
72 |
-
architecture = ";".join(architectures)
|
73 |
-
"""
|
74 |
|
75 |
# Extract results available in this file (some results are split in several files)
|
76 |
results = {}
|
77 |
|
78 |
results_obj = data.get("results")
|
79 |
-
|
80 |
results["average_score"] = results_obj.get("average_score")
|
81 |
results["speed"] = results_obj.get("speed")
|
82 |
results["contamination_score"] = results_obj.get("contamination_score")
|
@@ -98,50 +77,30 @@ class EvalResult:
|
|
98 |
model=model,
|
99 |
model_source=config.get("model_source", ""),
|
100 |
model_category=config.get("model_category", ""),
|
101 |
-
#num_params=config.get("params", 0),
|
102 |
-
#license=config.get("license", "?"),
|
103 |
-
#likes=config.get("likes", -1),
|
104 |
results=results,
|
105 |
-
#precision=precision,
|
106 |
-
#revision= config.get("model_sha", ""),
|
107 |
still_on_hub=still_on_hub,
|
108 |
-
#architecture=architecture
|
109 |
)
|
110 |
|
111 |
def update_with_request_file(self, requests_path):
|
112 |
"""Finds the relevant request file for the current model and updates info with it"""
|
113 |
-
request_file = get_request_file_for_model(requests_path, self.full_model)
|
114 |
try:
|
115 |
with open(request_file, "r") as f:
|
116 |
request = json.load(f)
|
117 |
|
118 |
-
#self.model_type = ModelType.from_str(request.get("model_type", ""))
|
119 |
-
#self.weight_type = WeightType[request.get("weight_type", "Original")]
|
120 |
-
#self.license = request.get("license", "?")
|
121 |
-
#self.likes = request.get("likes", 0)
|
122 |
-
#self.params = request.get("params", 0)
|
123 |
self.date = request.get("submitted_time", "")
|
124 |
except Exception:
|
125 |
-
print(f"Could not find request file for {self.org}/{self.model}")
|
126 |
|
127 |
def to_dict(self):
|
128 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
129 |
average_score = self.results["average_score"]
|
130 |
data_dict = {
|
131 |
"eval_name": self.eval_name, # not a column, just a save name,
|
132 |
-
#AutoEvalColumn.precision.name: self.precision.value.name,
|
133 |
AutoEvalColumn.model_source.name: self.model_source,
|
134 |
AutoEvalColumn.model_category.name: self.model_category,
|
135 |
-
#AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
|
136 |
-
#AutoEvalColumn.weight_type.name: self.weight_type.value.name,
|
137 |
-
#AutoEvalColumn.architecture.name: self.architecture,
|
138 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
139 |
-
#AutoEvalColumn.revision.name: self.revision,
|
140 |
AutoEvalColumn.average_score.name: average_score,
|
141 |
-
#AutoEvalColumn.license.name: self.license,
|
142 |
-
#AutoEvalColumn.likes.name: self.likes,
|
143 |
-
#AutoEvalColumn.params.name: self.num_params,
|
144 |
-
#AutoEvalColumn.still_on_hub.name: self.still_on_hub,
|
145 |
}
|
146 |
|
147 |
for eval_dim in EvalDimensions:
|
@@ -159,11 +118,11 @@ class EvalResult:
|
|
159 |
return data_dict
|
160 |
|
161 |
|
162 |
-
def get_request_file_for_model(requests_path, model_name):
|
163 |
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
164 |
request_files = os.path.join(
|
165 |
requests_path,
|
166 |
-
f"{model_name}
|
167 |
)
|
168 |
|
169 |
request_files = glob.glob(request_files)
|
@@ -176,7 +135,6 @@ def get_request_file_for_model(requests_path, model_name): #,precision
|
|
176 |
req_content = json.load(f)
|
177 |
if (
|
178 |
req_content["status"] in ["FINISHED"]
|
179 |
-
#and req_content["precision"] == precision.split(".")[-1]
|
180 |
):
|
181 |
request_file = tmp_request_file
|
182 |
return request_file
|
@@ -187,8 +145,8 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
187 |
model_result_filepaths = []
|
188 |
|
189 |
for root, _, files in os.walk(results_path):
|
190 |
-
|
191 |
-
|
192 |
#if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
193 |
# continue
|
194 |
files = [f for f in files if f.endswith(".json")]
|
@@ -199,7 +157,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
199 |
except dateutil.parser._parser.ParserError as e:
|
200 |
print("Error",e)
|
201 |
files = [files[-1]]
|
202 |
-
|
203 |
for file in files:
|
204 |
model_result_filepaths.append(os.path.join(root, file))
|
205 |
|
@@ -207,7 +165,7 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
207 |
for model_result_filepath in model_result_filepaths:
|
208 |
# Creation of result
|
209 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
210 |
-
eval_result.update_with_request_file(requests_path)
|
211 |
|
212 |
# Store results of same eval together
|
213 |
eval_name = eval_result.eval_name
|
@@ -217,20 +175,17 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
|
|
217 |
eval_results[eval_name] = eval_result
|
218 |
|
219 |
results = []
|
220 |
-
|
221 |
for v in eval_results.values():
|
222 |
try:
|
223 |
-
|
224 |
v.to_dict() # we test if the dict version is complete
|
225 |
results.append(v)
|
226 |
except KeyError: # not all eval values present
|
227 |
print("Key error in eval result, skipping")
|
228 |
|
229 |
-
print(v)
|
230 |
-
print(v.to_dict())
|
231 |
continue
|
232 |
|
233 |
-
print(results)
|
234 |
return results
|
235 |
|
236 |
|
|
|
1 |
import glob
|
2 |
import json
|
|
|
3 |
import os
|
4 |
from dataclasses import dataclass
|
|
|
5 |
import dateutil
|
|
|
6 |
|
7 |
+
from src.display.formatting import make_clickable_model
|
8 |
+
from src.display.utils import AutoEvalColumn, EvalDimensions
|
9 |
from src.submission.check_validity import is_model_on_hub
|
10 |
|
11 |
|
|
|
17 |
full_model: str # org/model (path on hub)
|
18 |
org: str
|
19 |
model: str
|
|
|
20 |
results: dict
|
|
|
|
|
21 |
model_source: str = "" # HF, API, ...
|
22 |
model_category: str = "" #Nano, Small, Medium, Large
|
|
|
|
|
|
|
|
|
|
|
23 |
date: str = "" # submission date of request file
|
24 |
still_on_hub: bool = False
|
25 |
|
|
|
31 |
|
32 |
config = data.get("config")
|
33 |
|
|
|
|
|
|
|
34 |
# Get model and org
|
35 |
org_and_model = config.get("model", config.get("model_args", None))
|
36 |
+
|
37 |
org_and_model = org_and_model.split("/", 1)
|
38 |
|
39 |
if len(org_and_model) == 1:
|
40 |
org = None
|
41 |
model = org_and_model[0]
|
42 |
+
result_key = f"{model}"
|
43 |
else:
|
44 |
org = org_and_model[0]
|
45 |
model = org_and_model[1]
|
46 |
+
result_key = f"{org}_{model}"
|
47 |
full_model = "/".join(org_and_model)
|
48 |
|
49 |
+
still_on_hub, _, _ = is_model_on_hub(
|
50 |
full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False
|
51 |
)
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
# Extract results available in this file (some results are split in several files)
|
55 |
results = {}
|
56 |
|
57 |
results_obj = data.get("results")
|
58 |
+
|
59 |
results["average_score"] = results_obj.get("average_score")
|
60 |
results["speed"] = results_obj.get("speed")
|
61 |
results["contamination_score"] = results_obj.get("contamination_score")
|
|
|
77 |
model=model,
|
78 |
model_source=config.get("model_source", ""),
|
79 |
model_category=config.get("model_category", ""),
|
|
|
|
|
|
|
80 |
results=results,
|
|
|
|
|
81 |
still_on_hub=still_on_hub,
|
|
|
82 |
)
|
83 |
|
84 |
def update_with_request_file(self, requests_path):
|
85 |
"""Finds the relevant request file for the current model and updates info with it"""
|
86 |
+
request_file = get_request_file_for_model(requests_path, self.full_model)
|
87 |
try:
|
88 |
with open(request_file, "r") as f:
|
89 |
request = json.load(f)
|
90 |
|
|
|
|
|
|
|
|
|
|
|
91 |
self.date = request.get("submitted_time", "")
|
92 |
except Exception:
|
93 |
+
print(f"Could not find request file for {self.org}/{self.model}")
|
94 |
|
95 |
def to_dict(self):
|
96 |
"""Converts the Eval Result to a dict compatible with our dataframe display"""
|
97 |
average_score = self.results["average_score"]
|
98 |
data_dict = {
|
99 |
"eval_name": self.eval_name, # not a column, just a save name,
|
|
|
100 |
AutoEvalColumn.model_source.name: self.model_source,
|
101 |
AutoEvalColumn.model_category.name: self.model_category,
|
|
|
|
|
|
|
102 |
AutoEvalColumn.model.name: make_clickable_model(self.full_model),
|
|
|
103 |
AutoEvalColumn.average_score.name: average_score,
|
|
|
|
|
|
|
|
|
104 |
}
|
105 |
|
106 |
for eval_dim in EvalDimensions:
|
|
|
118 |
return data_dict
|
119 |
|
120 |
|
121 |
+
def get_request_file_for_model(requests_path, model_name):
|
122 |
"""Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
|
123 |
request_files = os.path.join(
|
124 |
requests_path,
|
125 |
+
f"{model_name}_eval_request.json",
|
126 |
)
|
127 |
|
128 |
request_files = glob.glob(request_files)
|
|
|
135 |
req_content = json.load(f)
|
136 |
if (
|
137 |
req_content["status"] in ["FINISHED"]
|
|
|
138 |
):
|
139 |
request_file = tmp_request_file
|
140 |
return request_file
|
|
|
145 |
model_result_filepaths = []
|
146 |
|
147 |
for root, _, files in os.walk(results_path):
|
148 |
+
|
149 |
+
## we allow HTML files now
|
150 |
#if len(files) == 0 or any([not f.endswith(".json") for f in files]):
|
151 |
# continue
|
152 |
files = [f for f in files if f.endswith(".json")]
|
|
|
157 |
except dateutil.parser._parser.ParserError as e:
|
158 |
print("Error",e)
|
159 |
files = [files[-1]]
|
160 |
+
|
161 |
for file in files:
|
162 |
model_result_filepaths.append(os.path.join(root, file))
|
163 |
|
|
|
165 |
for model_result_filepath in model_result_filepaths:
|
166 |
# Creation of result
|
167 |
eval_result = EvalResult.init_from_json_file(model_result_filepath)
|
168 |
+
#eval_result.update_with_request_file(requests_path) ##not needed, save processing time
|
169 |
|
170 |
# Store results of same eval together
|
171 |
eval_name = eval_result.eval_name
|
|
|
175 |
eval_results[eval_name] = eval_result
|
176 |
|
177 |
results = []
|
178 |
+
|
179 |
for v in eval_results.values():
|
180 |
try:
|
181 |
+
|
182 |
v.to_dict() # we test if the dict version is complete
|
183 |
results.append(v)
|
184 |
except KeyError: # not all eval values present
|
185 |
print("Key error in eval result, skipping")
|
186 |
|
|
|
|
|
187 |
continue
|
188 |
|
|
|
189 |
return results
|
190 |
|
191 |
|
src/populate.py
CHANGED
@@ -31,12 +31,7 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
|
|
31 |
df[col] = df[col].round(2)
|
32 |
|
33 |
df["Benchmark Score (0-10)"] = df["Benchmark Score (0-10)"].astype(str)
|
34 |
-
print(df["Benchmark Score (0-10)"])
|
35 |
|
36 |
-
print("###############\n\n\n\n\n\n###############")
|
37 |
-
|
38 |
-
print(df)
|
39 |
-
print(df.info())
|
40 |
|
41 |
|
42 |
return df
|
@@ -64,10 +59,10 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
|
64 |
# this is a folder
|
65 |
|
66 |
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(f"{save_path}/{entry}/{e}") ]#and not e.startswith(".")
|
67 |
-
|
68 |
for sub_entry in sub_entries:
|
69 |
file_path = os.path.join(save_path, entry, sub_entry)
|
70 |
-
|
71 |
|
72 |
with open(file_path) as fp:
|
73 |
data = json.load(fp)
|
@@ -78,10 +73,8 @@ def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
|
78 |
|
79 |
|
80 |
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
81 |
-
|
82 |
-
running_list = [e for e in all_evals if e["status"] == "RUNNING"]
|
83 |
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
84 |
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
85 |
-
df_running = pd.DataFrame.from_records(running_list, columns=cols)
|
86 |
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
87 |
-
return df_finished[cols],
|
|
|
31 |
df[col] = df[col].round(2)
|
32 |
|
33 |
df["Benchmark Score (0-10)"] = df["Benchmark Score (0-10)"].astype(str)
|
|
|
34 |
|
|
|
|
|
|
|
|
|
35 |
|
36 |
|
37 |
return df
|
|
|
59 |
# this is a folder
|
60 |
|
61 |
sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if os.path.isfile(f"{save_path}/{entry}/{e}") ]#and not e.startswith(".")
|
62 |
+
|
63 |
for sub_entry in sub_entries:
|
64 |
file_path = os.path.join(save_path, entry, sub_entry)
|
65 |
+
|
66 |
|
67 |
with open(file_path) as fp:
|
68 |
data = json.load(fp)
|
|
|
73 |
|
74 |
|
75 |
pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]]
|
76 |
+
|
|
|
77 |
finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"]
|
78 |
df_pending = pd.DataFrame.from_records(pending_list, columns=cols)
|
|
|
79 |
df_finished = pd.DataFrame.from_records(finished_list, columns=cols)
|
80 |
+
return df_finished[cols], df_pending[cols]
|
src/submission/check_validity.py
CHANGED
@@ -65,7 +65,7 @@ def get_model_size(model_info: ModelInfo): #, precision: str
|
|
65 |
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
66 |
except (AttributeError, TypeError):
|
67 |
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
68 |
-
|
69 |
size_factor = 1#8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
70 |
model_size = size_factor * model_size
|
71 |
return model_size
|
@@ -88,7 +88,7 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
|
|
88 |
continue
|
89 |
with open(os.path.join(root, file), "r") as f:
|
90 |
info = json.load(f)
|
91 |
-
file_names.append(f"{info['model']}")
|
92 |
|
93 |
# Select organisation
|
94 |
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
|
|
65 |
model_size = round(model_info.safetensors["total"] / 1e9, 3)
|
66 |
except (AttributeError, TypeError):
|
67 |
return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
|
68 |
+
|
69 |
size_factor = 1#8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1
|
70 |
model_size = size_factor * model_size
|
71 |
return model_size
|
|
|
88 |
continue
|
89 |
with open(os.path.join(root, file), "r") as f:
|
90 |
info = json.load(f)
|
91 |
+
file_names.append(f"{info['model']}")
|
92 |
|
93 |
# Select organisation
|
94 |
if info["model"].count("/") == 0 or "submitted_time" not in info:
|
src/submission/submit.py
CHANGED
@@ -18,11 +18,6 @@ USERS_TO_SUBMISSION_DATES = None
|
|
18 |
def add_new_eval(
|
19 |
model: str,
|
20 |
progress=gr.Progress()
|
21 |
-
#base_model: str,
|
22 |
-
#revision: str,
|
23 |
-
#precision: str,
|
24 |
-
#weight_type: str,
|
25 |
-
#model_type: str,
|
26 |
):
|
27 |
global REQUESTED_MODELS
|
28 |
global USERS_TO_SUBMISSION_DATES
|
@@ -37,18 +32,19 @@ def add_new_eval(
|
|
37 |
user_name = model.split("/")[0]
|
38 |
model_path = model.split("/")[1]
|
39 |
|
40 |
-
#precision = precision.split(" ")[0]
|
41 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
42 |
|
43 |
|
44 |
progress(0.1, desc=f"Checking model {model} on hub")
|
45 |
|
46 |
-
if not is_model_on_hub(model_name=model, token=TOKEN, test_tokenizer=True):
|
47 |
yield styled_error("Model does not exist on HF Hub. Please select a valid model name.")
|
48 |
return
|
49 |
|
50 |
-
|
51 |
progress(0.2, desc=f"Checking for banned orgs")
|
|
|
|
|
52 |
banned_orgs = [{
|
53 |
'org_name':'TEMPLATE',
|
54 |
'banning_reason':'Submitting contaminated models'
|
@@ -60,34 +56,16 @@ def add_new_eval(
|
|
60 |
)
|
61 |
return
|
62 |
|
63 |
-
|
64 |
-
if model_type is None or model_type == "":
|
65 |
-
return styled_error("Please select a model type.")
|
66 |
-
|
67 |
-
# Does the model actually exist?
|
68 |
-
if revision == "":
|
69 |
-
revision = "main"
|
70 |
-
|
71 |
-
# Is the model on the hub?
|
72 |
-
if weight_type in ["Delta", "Adapter"]:
|
73 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
|
74 |
-
if not base_model_on_hub:
|
75 |
-
return styled_error(f'Base model "{base_model}" {error}')
|
76 |
-
|
77 |
-
if not weight_type == "Adapter":
|
78 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
|
79 |
-
if not model_on_hub:
|
80 |
-
return styled_error(f'Model "{model}" {error}')
|
81 |
-
"""
|
82 |
# Is the model info correctly filled?
|
83 |
try:
|
84 |
-
model_info = API.model_info(repo_id=model)
|
85 |
except Exception:
|
86 |
yield styled_error("Could not get your model information. Please fill it up properly.")
|
87 |
return
|
88 |
|
89 |
progress(0.3, desc=f"Checking model size")
|
90 |
-
model_size = get_model_size(model_info=model_info)
|
91 |
|
92 |
if model_size>15:
|
93 |
yield styled_error("We currently accept community-submitted models up to 15 billion parameters only. If you represent an organization then please contact us at benchmark@silma.ai")
|
@@ -131,7 +109,7 @@ def add_new_eval(
|
|
131 |
progress(0.8, desc=f"Checking same model submissions")
|
132 |
|
133 |
# Check for duplicate submission
|
134 |
-
if f"{model}" in REQUESTED_MODELS:
|
135 |
yield styled_warning("This model has already been submitted.")
|
136 |
return
|
137 |
|
@@ -141,17 +119,11 @@ def add_new_eval(
|
|
141 |
eval_entry = {
|
142 |
"model": model,
|
143 |
"model_sha": model_info.sha,
|
144 |
-
#"base_model": base_model,
|
145 |
-
#"revision": revision,
|
146 |
-
#"precision": precision,
|
147 |
-
#"weight_type": weight_type,
|
148 |
"status": "PENDING",
|
149 |
"submitted_time": current_time,
|
150 |
-
#"model_type": model_type,
|
151 |
"likes": model_info.likes,
|
152 |
"params": model_size,
|
153 |
"license": license,
|
154 |
-
#"private": False,
|
155 |
}
|
156 |
|
157 |
|
@@ -160,7 +132,7 @@ def add_new_eval(
|
|
160 |
print("Creating eval file")
|
161 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
162 |
os.makedirs(OUT_DIR, exist_ok=True)
|
163 |
-
out_path = f"{OUT_DIR}/{model_path}_eval_request.json"
|
164 |
|
165 |
with open(out_path, "w") as f:
|
166 |
f.write(json.dumps(eval_entry))
|
@@ -182,7 +154,7 @@ def add_new_eval(
|
|
182 |
queue_data = json.load(f)
|
183 |
|
184 |
queue_len = len(queue_data)
|
185 |
-
|
186 |
|
187 |
if queue_len == 0:
|
188 |
queue_data = []
|
@@ -192,10 +164,6 @@ def add_new_eval(
|
|
192 |
|
193 |
queue_data.append(eval_entry)
|
194 |
|
195 |
-
print(queue_data)
|
196 |
-
|
197 |
-
#with open(queue_file, "w") as f:
|
198 |
-
# json.dump(queue_data, f)
|
199 |
|
200 |
print("Updating eval queue file")
|
201 |
API.upload_file(
|
|
|
18 |
def add_new_eval(
|
19 |
model: str,
|
20 |
progress=gr.Progress()
|
|
|
|
|
|
|
|
|
|
|
21 |
):
|
22 |
global REQUESTED_MODELS
|
23 |
global USERS_TO_SUBMISSION_DATES
|
|
|
32 |
user_name = model.split("/")[0]
|
33 |
model_path = model.split("/")[1]
|
34 |
|
|
|
35 |
current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
36 |
|
37 |
|
38 |
progress(0.1, desc=f"Checking model {model} on hub")
|
39 |
|
40 |
+
if not is_model_on_hub(model_name=model, token=TOKEN, test_tokenizer=True):
|
41 |
yield styled_error("Model does not exist on HF Hub. Please select a valid model name.")
|
42 |
return
|
43 |
|
44 |
+
|
45 |
progress(0.2, desc=f"Checking for banned orgs")
|
46 |
+
|
47 |
+
##check for org banning
|
48 |
banned_orgs = [{
|
49 |
'org_name':'TEMPLATE',
|
50 |
'banning_reason':'Submitting contaminated models'
|
|
|
56 |
)
|
57 |
return
|
58 |
|
59 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
# Is the model info correctly filled?
|
61 |
try:
|
62 |
+
model_info = API.model_info(repo_id=model)
|
63 |
except Exception:
|
64 |
yield styled_error("Could not get your model information. Please fill it up properly.")
|
65 |
return
|
66 |
|
67 |
progress(0.3, desc=f"Checking model size")
|
68 |
+
model_size = get_model_size(model_info=model_info)
|
69 |
|
70 |
if model_size>15:
|
71 |
yield styled_error("We currently accept community-submitted models up to 15 billion parameters only. If you represent an organization then please contact us at benchmark@silma.ai")
|
|
|
109 |
progress(0.8, desc=f"Checking same model submissions")
|
110 |
|
111 |
# Check for duplicate submission
|
112 |
+
if f"{model}" in REQUESTED_MODELS:
|
113 |
yield styled_warning("This model has already been submitted.")
|
114 |
return
|
115 |
|
|
|
119 |
eval_entry = {
|
120 |
"model": model,
|
121 |
"model_sha": model_info.sha,
|
|
|
|
|
|
|
|
|
122 |
"status": "PENDING",
|
123 |
"submitted_time": current_time,
|
|
|
124 |
"likes": model_info.likes,
|
125 |
"params": model_size,
|
126 |
"license": license,
|
|
|
127 |
}
|
128 |
|
129 |
|
|
|
132 |
print("Creating eval file")
|
133 |
OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
|
134 |
os.makedirs(OUT_DIR, exist_ok=True)
|
135 |
+
out_path = f"{OUT_DIR}/{model_path}_eval_request.json"
|
136 |
|
137 |
with open(out_path, "w") as f:
|
138 |
f.write(json.dumps(eval_entry))
|
|
|
154 |
queue_data = json.load(f)
|
155 |
|
156 |
queue_len = len(queue_data)
|
157 |
+
|
158 |
|
159 |
if queue_len == 0:
|
160 |
queue_data = []
|
|
|
164 |
|
165 |
queue_data.append(eval_entry)
|
166 |
|
|
|
|
|
|
|
|
|
167 |
|
168 |
print("Updating eval queue file")
|
169 |
API.upload_file(
|