|
import gradio as gr
|
|
from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
|
|
import pandas as pd
|
|
from apscheduler.schedulers.background import BackgroundScheduler
|
|
from huggingface_hub import snapshot_download
|
|
from src.about import CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT, EVALUATION_QUEUE_TEXT, INTRODUCTION_TEXT, LLM_BENCHMARKS_TEXT, TITLE
|
|
from src.tasks import TASK_DESCRIPTIONS, MEASURE_DESCRIPTION
|
|
from src.display.css_html_js import custom_css
|
|
from src.display.utils import BENCHMARK_COLS, COLS, EVAL_COLS, EVAL_TYPES, AutoEvalColumn, ModelType, fields, WeightType, Precision
|
|
from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
|
|
from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
|
from src.submission.submit import add_new_eval
|
|
import random
|
|
import matplotlib.pyplot as plt
|
|
import re
|
|
import plotly.express as px
|
|
import plotly.graph_objects as go
|
|
import numpy as np
|
|
|
|
|
|
def mean_of_max_per_field(df):
|
|
"""
|
|
Calcola il massimo per ciascun campo e poi la media dei massimi.
|
|
|
|
Args:
|
|
df (pd.DataFrame): DataFrame con colonne TE, SA, HS, AT, WIC, FAQ, LS, SU, NER, REL
|
|
|
|
Returns:
|
|
float: media dei valori massimi dei campi
|
|
"""
|
|
fields = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
|
|
|
|
|
|
|
|
|
|
missing = [f for f in fields if f not in df.columns]
|
|
if missing:
|
|
raise ValueError(f"Le seguenti colonne mancano nel DataFrame: {missing}")
|
|
|
|
|
|
max_values = df[fields].max()
|
|
|
|
|
|
mean_max = max_values.mean()
|
|
|
|
return mean_max
|
|
|
|
|
|
def boxplot_per_task(dataframe=None, baselines=None):
|
|
tasks = ["TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]
|
|
|
|
if dataframe is None:
|
|
np.random.seed(42)
|
|
dataframe = pd.DataFrame({
|
|
task: np.random.uniform(0.4, 0.9, 20) * 100
|
|
for task in tasks
|
|
})
|
|
|
|
|
|
if baselines is None:
|
|
baselines = {task: np.random.randint(50, 70) for task in tasks}
|
|
|
|
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd",
|
|
"#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
|
|
|
|
fig = go.Figure()
|
|
|
|
for i, task in enumerate(tasks):
|
|
if task in dataframe.columns:
|
|
y_data = dataframe[task].dropna().tolist()
|
|
|
|
|
|
fig.add_trace(go.Box(
|
|
y=y_data,
|
|
name=task,
|
|
boxmean="sd",
|
|
marker=dict(color=colors[i], line=dict(width=1)),
|
|
line=dict(color=colors[i]),
|
|
fillcolor=colors[i],
|
|
opacity=0.7,
|
|
hovertemplate=f"<b>{task}</b><br>Accuracy: "+"%{y:.2f}%"+"<extra></extra>",
|
|
width=0.6
|
|
))
|
|
|
|
|
|
if task in baselines and baselines[task] is not None:
|
|
|
|
fig.add_shape(
|
|
type="line",
|
|
x0=i-0.3, x1=i+0.3,
|
|
y0=baselines[task], y1=baselines[task],
|
|
line=dict(color="black", width=2, dash="dash"),
|
|
xref="x", yref="y"
|
|
)
|
|
|
|
|
|
fig.add_annotation(
|
|
x=i, y=baselines[task],
|
|
text=f"{baselines[task]}%",
|
|
showarrow=False,
|
|
yshift=10,
|
|
font=dict(size=10, color="black")
|
|
)
|
|
|
|
fig.update_layout(
|
|
title="Distribution of Model Accuracy by Task.",
|
|
xaxis_title="Task",
|
|
yaxis_title="Accuracy (%)",
|
|
template="plotly_white",
|
|
boxmode="group",
|
|
dragmode=False,
|
|
font=dict(family="Arial", size=13),
|
|
margin=dict(b=80),
|
|
annotations = [
|
|
dict(
|
|
text=(
|
|
"Boxplots show LLM accuracy in zero/few-shot settings. <br>"
|
|
"Black dashed lines indicate the best-performing supervised models evaluated during EVALITA."
|
|
),
|
|
xref="paper", yref="paper",
|
|
x=0.5, y=-0.33,
|
|
showarrow=False,
|
|
font=dict(size=12, color="gray")
|
|
)
|
|
]
|
|
)
|
|
|
|
fig.update_yaxes(range=[0, 100], fixedrange=True)
|
|
|
|
return fig
|
|
|
|
|
|
|
|
BASELINES = {
|
|
"TE":71.00, "SA": 66.38, "HS": 80.88, "AT": 82.40, "WIC": 85.00,
|
|
"LS": 38.82, "SU": 38.91, "NER":88.00, "REL": 62.99
|
|
}
|
|
|
|
|
|
|
|
|
|
def line_chart(dataframe):
|
|
|
|
df_true = dataframe[dataframe['IS_FS'] == True]
|
|
df_false = dataframe[dataframe['IS_FS'] == False]
|
|
|
|
|
|
x_true = df_true['#Params (B)'].tolist()
|
|
y_true = df_true['Avg. Comb. Perf. β¬οΈ'].tolist()
|
|
labels_true = [
|
|
re.search(r'>([^<>/]+/[^<>]+)<', m).group(1).split('/')[-1]
|
|
for m in df_true['Model'].tolist()
|
|
]
|
|
|
|
x_false = df_false['#Params (B)'].tolist()
|
|
y_false = df_false['Avg. Comb. Perf. β¬οΈ'].tolist()
|
|
labels_false = [
|
|
re.search(r'>([^<>/]+/[^<>]+)<', m).group(1).split('/')[-1]
|
|
for m in df_false['Model'].tolist()
|
|
]
|
|
|
|
fig = go.Figure()
|
|
|
|
|
|
fig.add_trace(go.Scatter(
|
|
x=x_true,
|
|
y=y_true,
|
|
mode='markers',
|
|
name='5-Shot',
|
|
marker=dict(color='red', size=10),
|
|
hovertemplate='<b>%{customdata}</b><br>#Params: %{x}<br>Performance: %{y}<extra></extra>',
|
|
customdata=labels_true
|
|
))
|
|
|
|
|
|
fig.add_trace(go.Scatter(
|
|
x=x_false,
|
|
y=y_false,
|
|
mode='markers',
|
|
name='0-Shot',
|
|
marker=dict(color='blue', size=10),
|
|
hovertemplate='<b>%{customdata}</b><br>#Params: %{x}<br>Performance: %{y}<extra></extra>',
|
|
customdata=labels_false
|
|
))
|
|
|
|
fig.update_layout(
|
|
title="Avg. Combined Performance vs #Params",
|
|
xaxis_title="#Params (B)",
|
|
yaxis_title="Avg. Combined Performance β¬οΈ",
|
|
template="plotly_white",
|
|
hovermode="closest",
|
|
dragmode=False
|
|
)
|
|
|
|
|
|
fig.update_xaxes(fixedrange=True, rangeslider_visible=False)
|
|
fig.update_yaxes(fixedrange=True)
|
|
|
|
|
|
return fig
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TASK_METADATA_MULTIPLECHOICE = {
|
|
"TE": {"icon": "π", "name": "Textual Entailment", "tooltip": ""},
|
|
"SA": {"icon": "π", "name": "Sentiment Analysis", "tooltip": ""},
|
|
"HS": {"icon": "β οΈ", "name": "Hate Speech", "tooltip": ""},
|
|
"AT": {"icon": "π₯", "name": "Admission Test", "tooltip": ""},
|
|
"WIC": {"icon": "π€", "name": "Word in Context", "tooltip": ""},
|
|
"FAQ": {"icon": "β", "name": "Frequently Asked Questions", "tooltip": ""}
|
|
}
|
|
|
|
|
|
TASK_METADATA_GENERATIVE = {
|
|
"LS": {"icon": "π", "name": "Lexical Substitution", "tooltip": ""},
|
|
"SU": {"icon": "π", "name": "Summarization", "tooltip": ""},
|
|
"NER": {"icon": "π·οΈ", "name": "Named Entity Recognition", "tooltip": ""},
|
|
"REL": {"icon": "π", "name": "Relation Extraction", "tooltip": ""},
|
|
}
|
|
|
|
def restart_space():
|
|
"""Restart the Hugging Face space."""
|
|
API.restart_space(repo_id=REPO_ID)
|
|
|
|
|
|
def init_leaderboard(dataframe, default_selection=None, hidden_columns=None):
|
|
"""
|
|
Initialize and return the leaderboard when it is first loaded or when 'benchmark' is selected.
|
|
The table is sorted based on the "Avg. Combined Performance" field.
|
|
"""
|
|
if dataframe is None or dataframe.empty:
|
|
raise ValueError("Leaderboard DataFrame is empty or None.")
|
|
|
|
|
|
|
|
sorted_dataframe = dataframe.sort_values(by="Avg. Comb. Perf. β¬οΈ", ascending=False)
|
|
|
|
sorted_dataframe = sorted_dataframe.reset_index(drop=True)
|
|
sorted_dataframe["rank"] = sorted_dataframe.index + 1
|
|
|
|
|
|
large_medal_fs_assigned = False
|
|
medium_medal_fs_assigned = False
|
|
small_medal_fs_assigned = False
|
|
|
|
large_medal_0shot_assigned = False
|
|
medium_medal_0shot_assigned = False
|
|
small_medal_0shot_assigned = False
|
|
|
|
|
|
new_model_column = []
|
|
|
|
for _, row in sorted_dataframe.iterrows():
|
|
if row['IS_FS']:
|
|
if row["#Params (B)"] > 30 and not large_medal_fs_assigned:
|
|
new_model_column.append(f"{row['Model']} 7οΈβ£0οΈβ£π
±οΈπ")
|
|
large_medal_fs_assigned = True
|
|
elif 10 < row["#Params (B)"] <= 30 and not medium_medal_fs_assigned:
|
|
new_model_column.append(f"{row['Model']} 3οΈβ£0οΈβ£π
±οΈπ")
|
|
medium_medal_fs_assigned = True
|
|
elif row["#Params (B)"] <= 10 and not small_medal_fs_assigned:
|
|
new_model_column.append(f"{row['Model']} 1οΈβ£0οΈβ£π
±οΈπ")
|
|
small_medal_fs_assigned = True
|
|
else:
|
|
new_model_column.append(row["Model"])
|
|
else:
|
|
if row["#Params (B)"] > 30 and not large_medal_0shot_assigned:
|
|
new_model_column.append(f"{row['Model']} 7οΈβ£0οΈβ£π
±οΈποΈ")
|
|
large_medal_0shot_assigned = True
|
|
elif 10 < row["#Params (B)"] <= 30 and not medium_medal_0shot_assigned:
|
|
new_model_column.append(f"{row['Model']} 3οΈβ£0οΈβ£π
±οΈποΈ")
|
|
medium_medal_0shot_assigned = True
|
|
elif row["#Params (B)"] <= 10 and not small_medal_0shot_assigned:
|
|
new_model_column.append(f"{row['Model']} 1οΈβ£0οΈβ£π
±οΈποΈ")
|
|
small_medal_0shot_assigned = True
|
|
else:
|
|
new_model_column.append(row["Model"])
|
|
|
|
|
|
sorted_dataframe["Model"] = new_model_column
|
|
|
|
field_list = fields(AutoEvalColumn)
|
|
|
|
return Leaderboard(
|
|
value=sorted_dataframe,
|
|
datatype=[c.type for c in field_list],
|
|
|
|
|
|
|
|
|
|
|
|
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
|
hide_columns=hidden_columns or [c.name for c in field_list if c.hidden],
|
|
filter_columns=[
|
|
ColumnFilter(AutoEvalColumn.fewshot_symbol.name, type="checkboxgroup", label="N-Shot Learning (FS)"),
|
|
|
|
|
|
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max = 100, default = [0,100], label="Select the number of parameters (B)"),
|
|
],
|
|
|
|
|
|
|
|
|
|
bool_checkboxgroup_label="Evaluation Mode",
|
|
interactive=False,
|
|
)
|
|
|
|
def update_task_leaderboard(dataframe, default_selection=None, hidden_columns=None):
|
|
"""
|
|
Update and return the leaderboard when a specific task is selected.
|
|
The table is sorted based on the "Combined Performance" field.
|
|
"""
|
|
if dataframe is None or dataframe.empty:
|
|
raise ValueError("Leaderboard DataFrame is empty or None.")
|
|
|
|
sorted_dataframe = dataframe.sort_values(by="Combined Performance", ascending=False)
|
|
|
|
|
|
sorted_dataframe = sorted_dataframe.reset_index(drop=True)
|
|
sorted_dataframe["rank"] = sorted_dataframe.index + 1
|
|
|
|
|
|
large_medal_fs_assigned = False
|
|
medium_medal_fs_assigned = False
|
|
small_medal_fs_assigned = False
|
|
|
|
large_medal_0shot_assigned = False
|
|
medium_medal_0shot_assigned = False
|
|
small_medal_0shot_assigned = False
|
|
|
|
|
|
new_model_column = []
|
|
|
|
for _, row in sorted_dataframe.iterrows():
|
|
if row['IS_FS']:
|
|
if row["#Params (B)"] > 30 and not large_medal_fs_assigned:
|
|
new_model_column.append(f"{row['Model']} 7οΈβ£0οΈβ£π
±οΈπ")
|
|
large_medal_fs_assigned = True
|
|
elif 10 < row["#Params (B)"] <= 30 and not medium_medal_fs_assigned:
|
|
new_model_column.append(f"{row['Model']} 3οΈβ£0οΈβ£π
±οΈπ")
|
|
medium_medal_fs_assigned = True
|
|
elif row["#Params (B)"] <= 10 and not small_medal_fs_assigned:
|
|
new_model_column.append(f"{row['Model']} 1οΈβ£0οΈβ£π
±οΈπ")
|
|
small_medal_fs_assigned = True
|
|
else:
|
|
new_model_column.append(row["Model"])
|
|
else:
|
|
if row["#Params (B)"] > 30 and not large_medal_0shot_assigned:
|
|
new_model_column.append(f"{row['Model']} 7οΈβ£0οΈβ£π
±οΈποΈ")
|
|
large_medal_0shot_assigned = True
|
|
elif 10 < row["#Params (B)"] <= 30 and not medium_medal_0shot_assigned:
|
|
new_model_column.append(f"{row['Model']} 3οΈβ£0οΈβ£π
±οΈποΈ")
|
|
medium_medal_0shot_assigned = True
|
|
elif row["#Params (B)"] <= 10 and not small_medal_0shot_assigned:
|
|
new_model_column.append(f"{row['Model']} 1οΈβ£0οΈβ£π
±οΈποΈ")
|
|
small_medal_0shot_assigned = True
|
|
else:
|
|
new_model_column.append(row["Model"])
|
|
|
|
|
|
sorted_dataframe["Model"] = new_model_column
|
|
|
|
pd.set_option('display.max_colwidth', None)
|
|
|
|
|
|
|
|
|
|
field_list = fields(AutoEvalColumn)
|
|
|
|
return Leaderboard(
|
|
value=sorted_dataframe,
|
|
|
|
datatype=[c.type for c in field_list] + [int],
|
|
|
|
|
|
|
|
|
|
|
|
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
|
hide_columns=hidden_columns or [c.name for c in field_list if c.hidden],
|
|
filter_columns=[
|
|
ColumnFilter(AutoEvalColumn.fewshot_symbol.name, type="checkboxgroup", label="N-Shot Learning (FS)"),
|
|
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max=100, default=[0, 100],
|
|
label="Select the number of parameters (B)"),
|
|
],
|
|
bool_checkboxgroup_label="Evaluation Mode",
|
|
interactive=False
|
|
)
|
|
|
|
'''
|
|
# Helper function for leaderboard initialization
|
|
def init_leaderboard(dataframe, default_selection=None, hidden_columns=None):
|
|
"""Initialize and return a leaderboard."""
|
|
if dataframe is None or dataframe.empty:
|
|
raise ValueError("Leaderboard DataFrame is empty or None.")
|
|
|
|
return Leaderboard(
|
|
value=dataframe,
|
|
datatype=[c.type for c in fields(AutoEvalColumn)],
|
|
select_columns=SelectColumns(
|
|
default_selection=default_selection or [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
|
|
cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
|
|
label="Select Columns to Display:",
|
|
),
|
|
search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
|
|
hide_columns=hidden_columns or [c.name for c in fields(AutoEvalColumn) if c.hidden],
|
|
filter_columns=[
|
|
ColumnFilter(AutoEvalColumn.fewshot_type.name, type="checkboxgroup", label="N-Few-Shot Learning (FS)"),
|
|
ColumnFilter(AutoEvalColumn.params.name, type="slider", min=0, max=150, label="Select the number of parameters (B)"),
|
|
],
|
|
bool_checkboxgroup_label="Hide models",
|
|
interactive=False,
|
|
)
|
|
'''
|
|
|
|
def download_snapshot(repo, local_dir):
|
|
"""Try to download a snapshot from Hugging Face Hub."""
|
|
try:
|
|
print(f"Downloading from {repo} to {local_dir}...")
|
|
snapshot_download(repo_id=repo, local_dir=local_dir, repo_type="dataset", tqdm_class=None, etag_timeout=30, token=TOKEN)
|
|
except Exception as e:
|
|
print(f"Error downloading {repo}: {e}")
|
|
restart_space()
|
|
|
|
|
|
|
|
download_snapshot(QUEUE_REPO, EVAL_REQUESTS_PATH)
|
|
download_snapshot(RESULTS_REPO, EVAL_RESULTS_PATH)
|
|
|
|
|
|
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
|
finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
|
|
|
|
|
|
theoretical_max_combined_perf = mean_of_max_per_field(LEADERBOARD_DF)
|
|
|
|
|
|
demo = gr.Blocks(css=custom_css)
|
|
with demo:
|
|
|
|
gr.HTML(
|
|
"""
|
|
<div style="display: flex; align-items: center; position: relative; width: 100%; height: 60px; padding: 10px 0;">
|
|
<h1 style="
|
|
margin: 0 auto;
|
|
font-weight: 900;
|
|
font-size: 2.5em;
|
|
letter-spacing: 2px;
|
|
text-transform: uppercase;
|
|
background: linear-gradient(90deg, #1f77b4, #00c6ff);
|
|
-webkit-background-clip: text;
|
|
-webkit-text-fill-color: transparent;
|
|
text-shadow: 2px 2px 8px rgba(0,0,0,0.2);
|
|
">
|
|
EVALITA-LLM Leaderboard
|
|
</h1>
|
|
<a href="https://huggingface.co/spaces/mii-llm/open_ita_llm_leaderboard" target="_blank"
|
|
style="position: absolute; right: 0; display: inline-flex; align-items: center; gap: 6px; text-decoration: none; color: #1f77b4; font-weight: 600;">
|
|
<!-- Icona stilizzata -->
|
|
<svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" fill="#1f77b4" viewBox="0 0 24 24">
|
|
<path d="M3.9 12a5 5 0 0 1 7.07-7.07l1.41 1.41-1.41 1.41-1.42-1.42a3 3 0 1 0 4.24 4.24l3.54-3.54a5 5 0 0 1-7.07 7.07l-1.41-1.41 1.41-1.41 1.42 1.42z"/>
|
|
<path d="M20.1 12a5 5 0 0 1-7.07 7.07l-1.41-1.41 1.41-1.41 1.42 1.42a3 3 0 1 0-4.24-4.24l-3.54 3.54a5 5 0 0 1 7.07-7.07l1.41 1.41-1.41 1.41-1.42-1.42z"/>
|
|
</svg>
|
|
Open Italian LLM Leaderboard
|
|
</a>
|
|
</div>
|
|
"""
|
|
)
|
|
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
|
|
|
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
|
|
|
|
|
with gr.TabItem("π
Benchmark"):
|
|
|
|
leaderboard = init_leaderboard(
|
|
LEADERBOARD_DF,
|
|
default_selection=['rank', 'FS', 'Model', "Avg. Comb. Perf. β¬οΈ", "TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"],
|
|
hidden_columns=[col for col in LEADERBOARD_DF.columns if col not in ['rank', 'FS', 'Model', "Avg. Comb. Perf. β¬οΈ", "TE", "SA", "HS", "AT", "WIC", "FAQ", "LS", "SU", "NER", "REL"]]
|
|
)
|
|
|
|
gr.HTML(
|
|
f"""
|
|
<div style="
|
|
border: 2px solid #1f77b4;
|
|
border-radius: 10px;
|
|
padding: 10px;
|
|
background-color: #f0f8ff;
|
|
font-weight: bold;
|
|
font-size: 14px;
|
|
display: inline-block;
|
|
">
|
|
Theoretical performance of a model that scores the highest on every individual task: <span style="color:#d62728; font-size:18px;">{theoretical_max_combined_perf:.2f}</span>
|
|
</div>
|
|
"""
|
|
)
|
|
|
|
with gr.TabItem("π Charts"):
|
|
|
|
|
|
gr.Plot(value=line_chart(LEADERBOARD_DF))
|
|
gr.Plot(value=boxplot_per_task(LEADERBOARD_DF, BASELINES))
|
|
|
|
|
|
with gr.TabItem("π About"):
|
|
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
|
|
|
|
|
with gr.TabItem("β", interactive=False):
|
|
gr.Markdown("", elem_classes="markdown-text")
|
|
|
|
|
|
|
|
for task, metadata in TASK_METADATA_MULTIPLECHOICE.items():
|
|
|
|
with gr.TabItem(f"{metadata['icon']}{task}"):
|
|
|
|
task_description = TASK_DESCRIPTIONS.get(task, "Description not available.")
|
|
gr.Markdown(task_description, elem_classes="markdown-text")
|
|
|
|
leaderboard = update_task_leaderboard(
|
|
LEADERBOARD_DF.rename(columns={f"{task} Prompt Average": "Prompt Average", f"{task} Prompt Std": "Prompt Std", f"{task} Best Prompt": "Best Prompt", f"{task} Best Prompt Id": "Best Prompt Id", task: "Combined Performance"}),
|
|
default_selection=['rank', 'FS', 'Model', 'Combined Performance', 'Prompt Average', 'Prompt Std', 'Best Prompt', 'Best Prompt Id'],
|
|
hidden_columns=[col for col in LEADERBOARD_DF.columns if col not in ['rank', 'FS', 'Model', 'Combined Performance', 'Prompt Average', 'Prompt Std', 'Best Prompt', 'Best Prompt Id']]
|
|
)
|
|
|
|
|
|
with gr.TabItem("β", interactive=False):
|
|
gr.Markdown("", elem_classes="markdown-text")
|
|
|
|
|
|
for task, metadata in TASK_METADATA_GENERATIVE.items():
|
|
with gr.TabItem(f"{metadata['icon']}{task}"):
|
|
task_description = TASK_DESCRIPTIONS.get(task, "Description not available.")
|
|
gr.Markdown(task_description, elem_classes="markdown-text")
|
|
|
|
leaderboard = update_task_leaderboard(
|
|
LEADERBOARD_DF.rename(columns={f"{task} Prompt Average": "Prompt Average",
|
|
f"{task} Prompt Std": "Prompt Std",
|
|
f"{task} Best Prompt": "Best Prompt",
|
|
f"{task} Best Prompt Id": "Best Prompt Id",
|
|
task: "Combined Performance"}),
|
|
default_selection=['rank', 'FS', 'Model', 'Combined Performance', 'Prompt Average', 'Prompt Std', 'Best Prompt',
|
|
'Best Prompt Id'],
|
|
hidden_columns=[col for col in LEADERBOARD_DF.columns if
|
|
col not in ['rank', 'FS', 'Model', 'Combined Performance', 'Prompt Average', 'Prompt Std',
|
|
'Best Prompt', 'Best Prompt Id']]
|
|
)
|
|
|
|
|
|
with gr.Accordion("π Citation", open=False):
|
|
gr.Textbox(value=CITATION_BUTTON_TEXT, label=CITATION_BUTTON_LABEL, lines=20, elem_id="citation-button", show_copy_button=True)
|
|
|
|
with gr.Accordion("π Credits", open=False):
|
|
gr.Markdown(
|
|
"""
|
|
**This project has benefited from the following support:**
|
|
|
|
- π§ **Codebase**: Based on and extended from the Open Italian LLM Leaderboard, developed by **Alessandro Ercolani** and **Samuele Colombo**. We warmly thank them for their invaluable support and guidance in implementing this leaderboard.
|
|
|
|
- πΆ **Funding**: Partially supported by the PNRR project **FAIR - Future AI Research (PE00000013)**, under the NRRP MUR program funded by **NextGenerationEU**.
|
|
|
|
- π₯οΈ **Computation**: We gratefully acknowledge **CINECA** for granting access to the **LEONARDO** supercomputer.
|
|
"""
|
|
)
|
|
|
|
|
|
scheduler = BackgroundScheduler()
|
|
scheduler.add_job(restart_space, "interval", seconds=1800)
|
|
scheduler.start()
|
|
|
|
|
|
demo.queue(default_concurrency_limit=40).launch(debug=True,
|
|
show_error=True) |