|
import gradio as gr |
|
import pandas as pd |
|
import numpy as np |
|
from typing import List, Dict |
|
|
|
|
|
|
|
data_synthesized_full = { |
|
'Method': ['BM25', 'DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)', 'ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b', 'multi-ada-002', 'ColBERTv2'], |
|
'STARK-AMAZON_Hit@1': [44.94, 15.29, 30.96, 26.56, 39.16, 40.93, 21.74, 42.08, 40.07, 46.10], |
|
'STARK-AMAZON_Hit@5': [67.42, 47.93, 51.06, 50.01, 62.73, 64.37, 41.65, 66.87, 64.98, 66.02], |
|
'STARK-AMAZON_R@20': [53.77, 44.49, 41.95, 52.05, 53.29, 54.28, 33.22, 56.52, 55.12, 53.44], |
|
'STARK-AMAZON_MRR': [55.30, 30.20, 40.66, 37.75, 50.35, 51.60, 31.47, 53.46, 51.55, 55.51], |
|
'STARK-MAG_Hit@1': [25.85, 10.51, 21.96, 12.88, 29.08, 30.06, 18.01, 37.90, 25.92, 31.18], |
|
'STARK-MAG_Hit@5': [45.25, 35.23, 36.50, 39.01, 49.61, 50.58, 34.85, 56.74, 50.43, 46.42], |
|
'STARK-MAG_R@20': [45.69, 42.11, 35.32, 46.97, 48.36, 50.49, 35.46, 46.40, 50.80, 43.94], |
|
'STARK-MAG_MRR': [34.91, 21.34, 29.14, 29.12, 38.62, 39.66, 26.10, 47.25, 36.94, 38.39], |
|
'STARK-PRIME_Hit@1': [12.75, 4.46, 6.53, 8.85, 12.63, 10.85, 10.10, 15.57, 15.10, 11.75], |
|
'STARK-PRIME_Hit@5': [27.92, 21.85, 15.67, 21.35, 31.49, 30.23, 22.49, 33.42, 33.56, 23.85], |
|
'STARK-PRIME_R@20': [31.25, 30.13, 16.52, 29.63, 36.00, 37.83, 26.34, 39.09, 38.05, 25.04], |
|
'STARK-PRIME_MRR': [19.84, 12.38, 11.05, 14.73, 21.41, 19.99, 16.12, 24.11, 23.49, 17.39] |
|
} |
|
|
|
data_synthesized_10 = { |
|
'Method': ['BM25', 'DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)', 'ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b', 'multi-ada-002', 'ColBERTv2', 'Claude3 Reranker', 'GPT4 Reranker'], |
|
'STARK-AMAZON_Hit@1': [42.68, 16.46, 30.09, 25.00, 39.02, 43.29, 18.90, 43.29, 40.85, 44.31, 45.49, 44.79], |
|
'STARK-AMAZON_Hit@5': [67.07, 50.00, 49.27, 48.17, 64.02, 67.68, 37.80, 71.34, 62.80, 65.24, 71.13, 71.17], |
|
'STARK-AMAZON_R@20': [54.48, 42.15, 41.91, 51.65, 49.30, 56.04, 34.73, 56.14, 52.47, 51.00, 53.77, 55.35], |
|
'STARK-AMAZON_MRR': [54.02, 30.20, 39.30, 36.87, 50.32, 54.20, 28.76, 55.07, 51.54, 55.07, 55.91, 55.69], |
|
'STARK-MAG_Hit@1': [27.81, 11.65, 22.89, 12.03, 28.20, 34.59, 19.17, 38.35, 25.56, 31.58, 36.54, 40.90], |
|
'STARK-MAG_Hit@5': [45.48, 36.84, 37.26, 37.97, 52.63, 50.75, 33.46, 58.64, 50.37, 47.36, 53.17, 58.18], |
|
'STARK-MAG_R@20': [44.59, 42.30, 44.16, 47.98, 49.25, 50.75, 29.85, 46.38, 53.03, 45.72, 48.36, 48.60], |
|
'STARK-MAG_MRR': [35.97, 21.82, 30.00, 28.70, 38.55, 42.90, 26.06, 48.25, 36.82, 38.98, 44.15, 49.00], |
|
'STARK-PRIME_Hit@1': [13.93, 5.00, 6.78, 7.14, 15.36, 12.14, 9.29, 16.79, 15.36, 15.00, 17.79, 18.28], |
|
'STARK-PRIME_Hit@5': [31.07, 23.57, 16.15, 17.14, 31.07, 31.42, 20.7, 34.29, 32.86, 26.07, 36.90, 37.28], |
|
'STARK-PRIME_R@20': [32.84, 30.50, 17.07, 32.95, 37.88, 37.34, 25.54, 41.11, 40.99, 27.78, 35.57, 34.05], |
|
'STARK-PRIME_MRR': [21.68, 13.50, 11.42, 16.27, 23.50, 21.23, 15.00, 24.99, 23.70, 19.98, 26.27, 26.55] |
|
} |
|
|
|
data_human_generated = { |
|
'Method': ['BM25', 'DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)', 'ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b', 'multi-ada-002', 'ColBERTv2', 'Claude3 Reranker', 'GPT4 Reranker'], |
|
'STARK-AMAZON_Hit@1': [27.16, 16.05, 25.93, 22.22, 39.50, 35.80, 29.63, 40.74, 46.91, 33.33, 53.09, 50.62], |
|
'STARK-AMAZON_Hit@5': [51.85, 39.51, 54.32, 49.38, 64.19, 62.96, 46.91, 71.60, 72.84, 55.56, 74.07, 75.31], |
|
'STARK-AMAZON_R@20': [29.23, 15.23, 23.69, 21.54, 35.46, 33.01, 21.21, 36.30, 40.22, 29.03, 35.46, 35.46], |
|
'STARK-AMAZON_MRR': [18.79, 27.21, 37.12, 31.33, 52.65, 47.84, 38.61, 53.21, 58.74, 43.77, 62.11, 61.06], |
|
'STARK-MAG_Hit@1': [32.14, 4.72, 25.00, 20.24, 28.57, 22.62, 16.67, 34.52, 23.81, 33.33, 38.10, 36.90], |
|
'STARK-MAG_Hit@5': [41.67, 9.52, 30.95, 26.19, 41.67, 36.90, 28.57, 44.04, 41.67, 36.90, 45.24, 46.43], |
|
'STARK-MAG_R@20': [32.46, 25.00, 27.24, 28.76, 35.95, 32.44, 21.74, 34.57, 39.85, 30.50, 35.95, 35.95], |
|
'STARK-MAG_MRR': [37.42, 7.90, 27.98, 25.53, 35.81, 29.68, 21.59, 38.72, 31.43, 35.97, 42.00, 40.65], |
|
'STARK-PRIME_Hit@1': [22.45, 2.04, 7.14, 6.12, 17.35, 16.33, 9.18, 25.51, 24.49, 15.31, 28.57, 28.57], |
|
'STARK-PRIME_Hit@5': [41.84, 9.18, 13.27, 13.27, 34.69, 32.65, 21.43, 41.84, 39.80, 26.53, 46.94, 44.90], |
|
'STARK-PRIME_R@20': [42.32, 10.69, 11.72, 17.62, 41.09, 39.01, 26.77, 48.10, 47.21, 25.56, 41.61, 41.61], |
|
'STARK-PRIME_MRR': [30.37, 7.05, 10.07, 9.39, 26.35, 24.33, 15.24, 34.28, 32.98, 19.67, 36.32, 34.82] |
|
} |
|
|
|
class DataManager: |
|
def __init__(self, data_synthesized_full: Dict, data_synthesized_10: Dict, data_human_generated: Dict): |
|
self.df_synthesized_full = pd.DataFrame(data_synthesized_full) |
|
self.df_synthesized_10 = pd.DataFrame(data_synthesized_10) |
|
self.df_human_generated = pd.DataFrame(data_human_generated) |
|
|
|
self.model_types = { |
|
'Sparse Retriever': ['BM25'], |
|
'Small Dense Retrievers': ['DPR (roberta)', 'ANCE (roberta)', 'QAGNN (roberta)'], |
|
'LLM-based Dense Retrievers': ['ada-002', 'voyage-l2-instruct', 'LLM2Vec', 'GritLM-7b'], |
|
'Multivector Retrievers': ['multi-ada-002', 'ColBERTv2'], |
|
'LLM Rerankers': ['Claude3 Reranker', 'GPT4 Reranker'] |
|
} |
|
|
|
self.metrics = ['Hit@1', 'Hit@5', 'R@20', 'MRR'] |
|
self.datasets = ['AMAZON', 'MAG', 'PRIME'] |
|
|
|
def filter_by_model_type(self, df: pd.DataFrame, selected_types: List[str]) -> pd.DataFrame: |
|
if not selected_types: |
|
return df.head(0) |
|
selected_models = [model for type in selected_types for model in self.model_types[type]] |
|
return df[df['Method'].isin(selected_models)] |
|
|
|
def format_dataframe(self, df: pd.DataFrame, dataset: str) -> pd.DataFrame: |
|
columns = ['Method'] + [col for col in df.columns if dataset in col] |
|
filtered_df = df[columns].copy() |
|
filtered_df.columns = [col.split('_')[-1] if '_' in col else col for col in filtered_df.columns] |
|
|
|
|
|
for col in filtered_df.columns: |
|
if col != 'Method': |
|
filtered_df[col] = filtered_df[col].round(2) |
|
|
|
|
|
filtered_df = filtered_df.sort_values('MRR', ascending=False) |
|
return filtered_df |
|
|
|
def get_best_model(self, df: pd.DataFrame, metric: str) -> str: |
|
return df.loc[df[metric].idxmax(), 'Method'] |
|
|
|
|
|
def create_metric_summary(df: pd.DataFrame, dataset: str) -> str: |
|
best_mrr = df['MRR'].max() |
|
best_hit1 = df['Hit@1'].max() |
|
best_model_mrr = df.loc[df['MRR'].idxmax(), 'Method'] |
|
best_model_hit1 = df.loc[df['Hit@1'].idxmax(), 'Method'] |
|
|
|
return f""" |
|
### {dataset} Dataset Summary |
|
- Best MRR: {best_mrr:.2f}% ({best_model_mrr}) |
|
- Best Hit@1: {best_hit1:.2f}% ({best_model_hit1}) |
|
""" |
|
|
|
|
|
def create_app(data_manager: DataManager): |
|
with gr.Blocks(css=""" |
|
.metric-summary { margin: 1rem 0; padding: 1rem; background: #f7f7f7; border-radius: 4px; } |
|
.table-container { margin-top: 1rem; } |
|
.model-filter { margin-bottom: 1rem; } |
|
.dataset-section { border: 1px solid #ddd; padding: 1rem; margin: 1rem 0; border-radius: 4px; } |
|
""") as demo: |
|
|
|
gr.Markdown("# Semi-structured Retrieval Benchmark (STaRK) Leaderboard") |
|
gr.Markdown("### An evaluation benchmark for semi-structured text retrieval") |
|
gr.Markdown("Refer to the [STaRK paper](https://arxiv.org/pdf/2404.13207) for details on metrics, tasks and models.") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
model_type_filter = gr.CheckboxGroup( |
|
choices=list(data_manager.model_types.keys()), |
|
value=list(data_manager.model_types.keys()), |
|
label="Model Types", |
|
interactive=True, |
|
elem_classes=["model-filter"] |
|
) |
|
|
|
with gr.Column(scale=1): |
|
sort_by = gr.Radio( |
|
choices=data_manager.metrics, |
|
value="MRR", |
|
label="Sort by Metric", |
|
interactive=True |
|
) |
|
|
|
all_dataframes = [] |
|
|
|
with gr.Tabs() as tabs: |
|
data_sources = [ |
|
("Synthesized (Full)", data_manager.df_synthesized_full), |
|
("Synthesized (10%)", data_manager.df_synthesized_10), |
|
("Human-Generated", data_manager.df_human_generated) |
|
] |
|
|
|
for source_name, source_df in data_sources: |
|
with gr.TabItem(source_name): |
|
for dataset in data_manager.datasets: |
|
with gr.Row(elem_classes=["dataset-section"]): |
|
with gr.Column(): |
|
gr.Markdown(create_metric_summary( |
|
data_manager.format_dataframe(source_df, f"STARK-{dataset}"), |
|
dataset |
|
)) |
|
df_display = gr.DataFrame( |
|
interactive=False, |
|
elem_classes=["table-container"] |
|
) |
|
all_dataframes.append(df_display) |
|
|
|
def update_tables(selected_types: List[str], sort_metric: str): |
|
outputs = [] |
|
for df_source in [data_manager.df_synthesized_full, |
|
data_manager.df_synthesized_10, |
|
data_manager.df_human_generated]: |
|
filtered_df = data_manager.filter_by_model_type(df_source, selected_types) |
|
for dataset in data_manager.datasets: |
|
formatted_df = data_manager.format_dataframe(filtered_df, f"STARK-{dataset}") |
|
formatted_df = formatted_df.sort_values(sort_metric, ascending=False) |
|
outputs.append(formatted_df) |
|
return outputs |
|
|
|
|
|
model_type_filter.change( |
|
update_tables, |
|
inputs=[model_type_filter, sort_by], |
|
outputs=all_dataframes |
|
) |
|
|
|
sort_by.change( |
|
update_tables, |
|
inputs=[model_type_filter, sort_by], |
|
outputs=all_dataframes |
|
) |
|
|
|
|
|
demo.load( |
|
update_tables, |
|
inputs=[model_type_filter, sort_by], |
|
outputs=all_dataframes |
|
) |
|
|
|
return demo |
|
|
|
if __name__ == "__main__": |
|
|
|
data_manager = DataManager(data_synthesized_full, data_synthesized_10, data_human_generated) |
|
|
|
|
|
demo = create_app(data_manager) |
|
demo.launch() |