|
import gradio as gr |
|
import pandas as pd |
|
import numpy as np |
|
|
|
|
|
try: |
|
df = pd.read_csv('results.csv', skipinitialspace=True) |
|
print(f"Successfully loaded {len(df)} rows from CSV") |
|
print(f"Columns: {list(df.columns)}") |
|
except Exception as e: |
|
print(f"Error reading CSV: {e}") |
|
print("Attempting to read with error handling...") |
|
|
|
df = pd.read_csv('results.csv', skipinitialspace=True, on_bad_lines='skip') |
|
print(f"Loaded {len(df)} rows after skipping bad lines") |
|
|
|
|
|
df = df.rename(columns={ |
|
'model_name': 'Model Name', |
|
'size': 'Size', |
|
'grounding_score': 'Separate Grounding Score', |
|
'quality_score': 'Separate Quality Score', |
|
'combined_score': 'Combined Score' |
|
}) |
|
|
|
|
|
df["Size_Display"] = df["Size"].apply(lambda x: f"{int(x)}B" if x == int(x) else f"{x}B") |
|
|
|
|
|
def get_size_category(size): |
|
if size <= 5: |
|
return "0-5B" |
|
elif size <= 10: |
|
return "5-10B" |
|
elif size <= 20: |
|
return "10-20B" |
|
elif size <= 40: |
|
return "20-40B" |
|
elif size <= 80: |
|
return "40-80B" |
|
else: |
|
return ">80B" |
|
|
|
df["Size_Category"] = df["Size"].apply(get_size_category) |
|
|
|
|
|
def filter_and_search_models(search_query, size_ranges, sort_by): |
|
"""Filter and search models based on user inputs""" |
|
filtered_df = df.copy() |
|
|
|
|
|
if search_query: |
|
mask = filtered_df["Model Name"].str.contains( |
|
search_query, case=False, na=False |
|
) |
|
filtered_df = filtered_df[mask] |
|
|
|
|
|
if size_ranges and len(size_ranges) > 0: |
|
filtered_df = filtered_df[filtered_df["Size_Category"].isin(size_ranges)] |
|
|
|
|
|
if sort_by in filtered_df.columns: |
|
filtered_df = filtered_df.sort_values(sort_by, ascending=False) |
|
|
|
|
|
filtered_df = filtered_df.reset_index(drop=True) |
|
filtered_df["Rank"] = range(1, len(filtered_df) + 1) |
|
|
|
|
|
display_df = filtered_df[ |
|
[ |
|
"Rank", |
|
"Model Name", |
|
"Size_Display", |
|
"Separate Grounding Score", |
|
"Separate Quality Score", |
|
"Combined Score", |
|
] |
|
] |
|
|
|
|
|
display_df = display_df.rename(columns={"Size_Display": "Size"}) |
|
|
|
|
|
for col in ["Separate Grounding Score", "Separate Quality Score", "Combined Score"]: |
|
display_df = display_df.copy() |
|
display_df[col] = display_df[col] |
|
|
|
return display_df |
|
|
|
|
|
|
|
with gr.Blocks(title="FACTS Grounding Benchmark", theme=gr.themes.Base()) as app: |
|
gr.Markdown("# 🏆 FACTS Grounding Benchmark") |
|
gr.Markdown( |
|
"### FACTS Medical Grounding is a benchmark designed to evaluate Open Models over medical domain." |
|
) |
|
|
|
with gr.Tabs(): |
|
with gr.TabItem("Leaderboard"): |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
search_box = gr.Textbox( |
|
label="Model Search", |
|
placeholder="Search for a model name...", |
|
value="", |
|
) |
|
|
|
with gr.Column(scale=1): |
|
sort_dropdown = gr.Dropdown( |
|
choices=[ |
|
"Combined Score", |
|
"Separate Grounding Score", |
|
"Separate Quality Score", |
|
], |
|
value="Combined Score", |
|
label="Sort by", |
|
elem_classes="sort-dropdown", |
|
) |
|
|
|
|
|
with gr.Row(): |
|
gr.Markdown("**Filter by Model Size:**") |
|
size_checkboxes = gr.CheckboxGroup( |
|
choices=["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"], |
|
value=["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"], |
|
label="", |
|
elem_classes="size-filter", |
|
container=False, |
|
) |
|
|
|
|
|
total_models = gr.Markdown(f"**Showing {len(df)} models**") |
|
|
|
|
|
results_table = gr.Dataframe( |
|
value=filter_and_search_models( |
|
"", |
|
["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"], |
|
"Combined Score", |
|
), |
|
headers=[ |
|
"Rank", |
|
"Model Name", |
|
"Separate Grounding Score", |
|
"Separate Quality Score", |
|
"Size", |
|
"Combined Score", |
|
], |
|
datatype=["number", "str", "str", "number", "number", "number"], |
|
elem_id="leaderboard-table", |
|
interactive=False, |
|
wrap=True, |
|
) |
|
|
|
|
|
with gr.Accordion("Metric Explanations", open=False): |
|
gr.Markdown( |
|
""" |
|
- **Grounding Score**: Measures the model's ability to provide factually accurate responses based on given context |
|
- **Quality Score**: Evaluates the overall quality of the model's responses including coherence and relevance |
|
- **Combined Score**: A weighted combination of grounding and quality scores representing overall performance |
|
""" |
|
) |
|
|
|
with gr.TabItem("About"): |
|
gr.Markdown( |
|
""" |
|
# About This Evaluation |
|
|
|
## FACTS Grounding Leaderboard |
|
|
|
The FACTS Grounding Leaderboard is a benchmark developed by Google DeepMind to evaluate how well Large Language Models (LLMs) can generate factually accurate responses that are fully grounded in provided context documents. |
|
|
|
### How It Works: |
|
1. **Input**: Each example contains a system instruction, a context document (up to 32k tokens), and a user request |
|
2. **Task**: Models must generate responses that answer the user's request using ONLY information from the provided context |
|
3. **Evaluation**: Responses are evaluated in two phases: |
|
- **Quality Check**: Does the response adequately address the user's request? |
|
- **Grounding Check**: Is every claim in the response supported by the context document? |
|
|
|
## Medical Domain Variation |
|
|
|
This implementation focuses specifically on medical domain examples from the FACTS benchmark to evaluate smaller, open-source models in healthcare contexts. |
|
|
|
### Key Modifications: |
|
- **Domain-Specific**: Uses only the 236 medical examples from the original 860-example dataset |
|
- **Single Judge Model**: Employs Gemini 1.5 Flash as the sole evaluator (vs. the original's ensemble of 3 models) |
|
- **Focus on Open Models**: Evaluates open-source models often missing from mainstream leaderboards for medical domain |
|
|
|
### Why Medical Domain? |
|
Medical information requires exceptional accuracy and grounding. By focusing on this domain, we can assess how well smaller models handle critical healthcare information while strictly adhering to provided sources—a crucial capability for safe medical AI applications. |
|
|
|
### Evaluation Metrics: |
|
- **Grounding Score**: Percentage of responses where all claims are supported by the context |
|
- **Quality Score**: Percentage of responses that adequately address the user's request |
|
- **Combined Score**: Percentage of responses that pass both quality and grounding checks |
|
|
|
This focused approach enables rapid iteration and testing of smaller models on domain-specific factual grounding tasks. |
|
|
|
--- |
|
|
|
## References |
|
|
|
- **Original Leaderboard by Google**: [FACTS Grounding Benchmark Leaderboard](https://www.kaggle.com/benchmarks/google/facts-grounding/leaderboard) |
|
- **Public Dataset**: [FACTS Grounding Examples Dataset](https://www.kaggle.com/datasets/deepmind/facts-grounding-examples/data) |
|
- **Technical Documentation**: [FACTS Grounding Benchmark Starter Code](https://www.kaggle.com/code/andrewmingwang/facts-grounding-benchmark-starter-code/notebook) |
|
|
|
--- |
|
""" |
|
) |
|
|
|
|
|
def update_table(search, sizes, sort_by): |
|
filtered_df = filter_and_search_models(search, sizes, sort_by) |
|
model_count = f"**Showing {len(filtered_df)} models**" |
|
return filtered_df, model_count |
|
|
|
|
|
search_box.change( |
|
fn=update_table, |
|
inputs=[search_box, size_checkboxes, sort_dropdown], |
|
outputs=[results_table, total_models], |
|
) |
|
|
|
size_checkboxes.change( |
|
fn=update_table, |
|
inputs=[search_box, size_checkboxes, sort_dropdown], |
|
outputs=[results_table, total_models], |
|
) |
|
|
|
sort_dropdown.change( |
|
fn=update_table, |
|
inputs=[search_box, size_checkboxes, sort_dropdown], |
|
outputs=[results_table, total_models], |
|
) |
|
|
|
|
|
app.css = """ |
|
#leaderboard-table { |
|
font-size: 14px; |
|
margin-top: 20px; |
|
max-height: 600px; |
|
overflow-y: auto; |
|
} |
|
|
|
#leaderboard-table td:first-child { |
|
text-align: center; |
|
font-weight: 600; |
|
color: #444; |
|
background-color: #f8f9fa; |
|
width: 60px; |
|
} |
|
|
|
#leaderboard-table td:nth-child(2) { |
|
font-weight: 500; |
|
max-width: 400px; |
|
} |
|
|
|
#leaderboard-table td:nth-child(3) { |
|
text-align: center; |
|
font-weight: 500; |
|
color: #666; |
|
} |
|
|
|
#leaderboard-table td:nth-child(n+4) { |
|
text-align: center; |
|
} |
|
|
|
.size-filter { |
|
display: flex; |
|
flex-wrap: wrap; |
|
gap: 15px; |
|
margin-top: 10px; |
|
} |
|
|
|
.size-filter label { |
|
display: flex; |
|
align-items: center; |
|
margin: 0; |
|
} |
|
|
|
.size-filter input[type="checkbox"] { |
|
margin-right: 5px; |
|
} |
|
|
|
/* Highlight rows based on model family */ |
|
#leaderboard-table tr:has(td:contains("meta-llama")) { |
|
background-color: #fffbf0; |
|
} |
|
|
|
#leaderboard-table tr:has(td:contains("deepseek")) { |
|
background-color: #f0f8ff; |
|
} |
|
|
|
#leaderboard-table tr:has(td:contains("Qwen")) { |
|
background-color: #f5fff5; |
|
} |
|
|
|
#leaderboard-table tr:has(td:contains("google")) { |
|
background-color: #fff0f5; |
|
} |
|
|
|
/* Header styling */ |
|
#leaderboard-table th { |
|
background-color: #f8f9fa; |
|
font-weight: 600; |
|
} |
|
|
|
#leaderboard-table th:first-child { |
|
width: 60px; |
|
text-align: center; |
|
} |
|
""" |
|
|
|
|
|
if __name__ == "__main__": |
|
app.launch() |
|
|