Commit
·
3d1714b
1
Parent(s):
dbee0a3
refactoring
Browse files
app.py
CHANGED
@@ -2,57 +2,29 @@ import gradio as gr
|
|
2 |
import pandas as pd
|
3 |
import numpy as np
|
4 |
|
5 |
-
#
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
'Qwen/Qwen3-14B',
|
17 |
-
'google/gemma-3-27b-it',
|
18 |
-
'Qwen/Qwen2.5-VL-32B-Instruct',
|
19 |
-
'meta-llama/Llama-3.1-70B-Instruct',
|
20 |
-
'google/gemma-3-12b-it',
|
21 |
-
'google/gemma-3-4b-it',
|
22 |
-
'Qwen/Qwen3-1.7B'
|
23 |
-
],
|
24 |
-
'Separate Grounding Score': [
|
25 |
-
0.817797, 0.93617, 0.842553, 0.812766, 0.770213, 0.740426,
|
26 |
-
0.766949, 0.748936, 0.778723, 0.936, 0.621277, 0.855932,
|
27 |
-
0.944, 0.9, 0.702128
|
28 |
-
],
|
29 |
-
'Separate Quality Score': [
|
30 |
-
0.542373, 0.459574, 0.510638, 0.540426, 0.540426, 0.553191,
|
31 |
-
0.516949, 0.523404, 0.502128, 0.391, 0.570213, 0.389831,
|
32 |
-
0.343, 0.33, 0.451064
|
33 |
-
],
|
34 |
-
'Combined Score': [
|
35 |
-
0.457627, 0.434043, 0.425532, 0.425532, 0.425532, 0.417021,
|
36 |
-
0.40678, 0.4, 0.382979, 0.378, 0.357447, 0.334746,
|
37 |
-
0.313, 0.3, 0.297872
|
38 |
-
]
|
39 |
-
}
|
40 |
-
|
41 |
-
# Create DataFrame
|
42 |
-
df = pd.DataFrame(data)
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
return 0
|
53 |
|
54 |
-
|
55 |
-
df[
|
56 |
|
57 |
# Add size category for filtering
|
58 |
def get_size_category(size):
|
@@ -69,119 +41,202 @@ def get_size_category(size):
|
|
69 |
else:
|
70 |
return ">80B"
|
71 |
|
72 |
-
df[
|
|
|
73 |
|
74 |
def filter_and_search_models(search_query, size_ranges, sort_by):
|
75 |
"""Filter and search models based on user inputs"""
|
76 |
filtered_df = df.copy()
|
77 |
-
|
78 |
# Apply search filter
|
79 |
if search_query:
|
80 |
-
mask = filtered_df[
|
|
|
|
|
81 |
filtered_df = filtered_df[mask]
|
82 |
-
|
83 |
# Apply size range filter
|
84 |
if size_ranges and len(size_ranges) > 0:
|
85 |
-
filtered_df = filtered_df[filtered_df[
|
86 |
-
|
87 |
# Sort by selected metric
|
88 |
if sort_by in filtered_df.columns:
|
89 |
filtered_df = filtered_df.sort_values(sort_by, ascending=False)
|
90 |
-
|
91 |
-
#
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
# Rename Size_Display to Size for cleaner display
|
96 |
-
display_df = display_df.rename(columns={
|
97 |
-
|
98 |
# Round numerical values for better display
|
99 |
-
for col in [
|
100 |
display_df = display_df.copy() # Create a copy to avoid SettingWithCopyWarning
|
101 |
-
display_df[col] = display_df[col]
|
102 |
-
|
103 |
return display_df
|
104 |
|
|
|
105 |
# Create the Gradio interface
|
106 |
-
with gr.Blocks(title="
|
107 |
-
gr.Markdown("# 🏆
|
108 |
-
gr.Markdown(
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
)
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
)
|
126 |
-
|
127 |
-
# Size filters in a row
|
128 |
-
with gr.Row():
|
129 |
-
gr.Markdown("**Filter by Model Size:**")
|
130 |
-
size_checkboxes = gr.CheckboxGroup(
|
131 |
-
choices=["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"],
|
132 |
-
value=["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"],
|
133 |
-
label="",
|
134 |
-
elem_classes="size-filter",
|
135 |
-
container=False
|
136 |
-
)
|
137 |
-
|
138 |
-
# Model count
|
139 |
-
total_models = gr.Markdown(f"**Showing {len(df)} models**")
|
140 |
-
|
141 |
-
# Results table below filters
|
142 |
-
results_table = gr.Dataframe(
|
143 |
-
value=filter_and_search_models("", ["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"], "Combined Score"),
|
144 |
-
headers=["Model Name", "Size", "Separate Grounding Score",
|
145 |
-
"Separate Quality Score", "Combined Score"],
|
146 |
-
datatype=["str", "str", "number", "number", "number"],
|
147 |
-
elem_id="leaderboard-table",
|
148 |
-
interactive=False,
|
149 |
-
wrap=True
|
150 |
-
)
|
151 |
-
|
152 |
-
# Metric explanations at the bottom
|
153 |
-
with gr.Accordion("Metric Explanations", open=False):
|
154 |
-
gr.Markdown("""
|
155 |
-
- **Grounding Score**: Measures the model's ability to provide factually accurate responses based on given context
|
156 |
-
- **Quality Score**: Evaluates the overall quality of the model's responses including coherence and relevance
|
157 |
-
- **Combined Score**: A weighted combination of grounding and quality scores representing overall performance
|
158 |
-
""")
|
159 |
-
|
160 |
# Update table when filters change
|
161 |
def update_table(search, sizes, sort_by):
|
162 |
filtered_df = filter_and_search_models(search, sizes, sort_by)
|
163 |
model_count = f"**Showing {len(filtered_df)} models**"
|
164 |
return filtered_df, model_count
|
165 |
-
|
166 |
# Connect all inputs to the update function
|
167 |
search_box.change(
|
168 |
fn=update_table,
|
169 |
inputs=[search_box, size_checkboxes, sort_dropdown],
|
170 |
-
outputs=[results_table, total_models]
|
171 |
)
|
172 |
-
|
173 |
size_checkboxes.change(
|
174 |
fn=update_table,
|
175 |
inputs=[search_box, size_checkboxes, sort_dropdown],
|
176 |
-
outputs=[results_table, total_models]
|
177 |
)
|
178 |
-
|
179 |
sort_dropdown.change(
|
180 |
fn=update_table,
|
181 |
inputs=[search_box, size_checkboxes, sort_dropdown],
|
182 |
-
outputs=[results_table, total_models]
|
183 |
)
|
184 |
-
|
185 |
# Add custom CSS for better styling
|
186 |
app.css = """
|
187 |
#leaderboard-table {
|
@@ -190,67 +245,76 @@ with gr.Blocks(title="FACT Leaderboard", theme=gr.themes.Base()) as app:
|
|
190 |
max-height: 600px;
|
191 |
overflow-y: auto;
|
192 |
}
|
193 |
-
|
194 |
#leaderboard-table td:first-child {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
font-weight: 500;
|
196 |
max-width: 400px;
|
197 |
}
|
198 |
-
|
199 |
-
#leaderboard-table td:nth-child(
|
200 |
text-align: center;
|
201 |
font-weight: 500;
|
202 |
color: #666;
|
203 |
}
|
204 |
-
|
205 |
-
#leaderboard-table td:nth-child(n+
|
206 |
text-align: center;
|
207 |
}
|
208 |
-
|
209 |
.size-filter {
|
210 |
display: flex;
|
211 |
flex-wrap: wrap;
|
212 |
gap: 15px;
|
213 |
margin-top: 10px;
|
214 |
}
|
215 |
-
|
216 |
.size-filter label {
|
217 |
display: flex;
|
218 |
align-items: center;
|
219 |
margin: 0;
|
220 |
}
|
221 |
-
|
222 |
.size-filter input[type="checkbox"] {
|
223 |
margin-right: 5px;
|
224 |
}
|
225 |
-
|
226 |
/* Highlight rows based on model family */
|
227 |
#leaderboard-table tr:has(td:contains("meta-llama")) {
|
228 |
background-color: #fffbf0;
|
229 |
}
|
230 |
-
|
231 |
#leaderboard-table tr:has(td:contains("deepseek")) {
|
232 |
background-color: #f0f8ff;
|
233 |
}
|
234 |
-
|
235 |
#leaderboard-table tr:has(td:contains("Qwen")) {
|
236 |
background-color: #f5fff5;
|
237 |
}
|
238 |
-
|
239 |
#leaderboard-table tr:has(td:contains("google")) {
|
240 |
background-color: #fff0f5;
|
241 |
}
|
242 |
-
|
243 |
/* Header styling */
|
244 |
#leaderboard-table th {
|
245 |
background-color: #f8f9fa;
|
246 |
font-weight: 600;
|
247 |
}
|
248 |
-
"""
|
249 |
|
250 |
-
#
|
251 |
-
|
252 |
-
|
|
|
|
|
253 |
|
254 |
# Launch the app
|
255 |
if __name__ == "__main__":
|
256 |
-
app.launch()
|
|
|
2 |
import pandas as pd
|
3 |
import numpy as np
|
4 |
|
5 |
+
# Read data from CSV file
|
6 |
+
try:
|
7 |
+
df = pd.read_csv('results.csv', skipinitialspace=True)
|
8 |
+
print(f"Successfully loaded {len(df)} rows from CSV")
|
9 |
+
print(f"Columns: {list(df.columns)}")
|
10 |
+
except Exception as e:
|
11 |
+
print(f"Error reading CSV: {e}")
|
12 |
+
print("Attempting to read with error handling...")
|
13 |
+
# Try reading with error handling for bad lines
|
14 |
+
df = pd.read_csv('results.csv', skipinitialspace=True, on_bad_lines='skip')
|
15 |
+
print(f"Loaded {len(df)} rows after skipping bad lines")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
# Map CSV columns to expected column names
|
18 |
+
df = df.rename(columns={
|
19 |
+
'model_name': 'Model Name',
|
20 |
+
'size': 'Size',
|
21 |
+
'grounding_score': 'Separate Grounding Score',
|
22 |
+
'quality_score': 'Separate Quality Score',
|
23 |
+
'combined_score': 'Combined Score'
|
24 |
+
})
|
|
|
25 |
|
26 |
+
# Create size display format
|
27 |
+
df["Size_Display"] = df["Size"].apply(lambda x: f"{int(x)}B" if x == int(x) else f"{x}B")
|
28 |
|
29 |
# Add size category for filtering
|
30 |
def get_size_category(size):
|
|
|
41 |
else:
|
42 |
return ">80B"
|
43 |
|
44 |
+
df["Size_Category"] = df["Size"].apply(get_size_category)
|
45 |
+
|
46 |
|
47 |
def filter_and_search_models(search_query, size_ranges, sort_by):
|
48 |
"""Filter and search models based on user inputs"""
|
49 |
filtered_df = df.copy()
|
50 |
+
|
51 |
# Apply search filter
|
52 |
if search_query:
|
53 |
+
mask = filtered_df["Model Name"].str.contains(
|
54 |
+
search_query, case=False, na=False
|
55 |
+
)
|
56 |
filtered_df = filtered_df[mask]
|
57 |
+
|
58 |
# Apply size range filter
|
59 |
if size_ranges and len(size_ranges) > 0:
|
60 |
+
filtered_df = filtered_df[filtered_df["Size_Category"].isin(size_ranges)]
|
61 |
+
|
62 |
# Sort by selected metric
|
63 |
if sort_by in filtered_df.columns:
|
64 |
filtered_df = filtered_df.sort_values(sort_by, ascending=False)
|
65 |
+
|
66 |
+
# Add ranking based on the sorted metric
|
67 |
+
filtered_df = filtered_df.reset_index(drop=True)
|
68 |
+
filtered_df["Rank"] = range(1, len(filtered_df) + 1)
|
69 |
+
|
70 |
+
# Select columns to display (including Rank and Size)
|
71 |
+
display_df = filtered_df[
|
72 |
+
[
|
73 |
+
"Rank",
|
74 |
+
"Model Name",
|
75 |
+
"Size_Display",
|
76 |
+
"Separate Grounding Score",
|
77 |
+
"Separate Quality Score",
|
78 |
+
"Combined Score",
|
79 |
+
]
|
80 |
+
]
|
81 |
+
|
82 |
# Rename Size_Display to Size for cleaner display
|
83 |
+
display_df = display_df.rename(columns={"Size_Display": "Size"})
|
84 |
+
|
85 |
# Round numerical values for better display
|
86 |
+
for col in ["Separate Grounding Score", "Separate Quality Score", "Combined Score"]:
|
87 |
display_df = display_df.copy() # Create a copy to avoid SettingWithCopyWarning
|
88 |
+
display_df[col] = display_df[col]
|
89 |
+
|
90 |
return display_df
|
91 |
|
92 |
+
|
93 |
# Create the Gradio interface
|
94 |
+
with gr.Blocks(title="FACTS Grounding Benchmark", theme=gr.themes.Base()) as app:
|
95 |
+
gr.Markdown("# 🏆 FACTS Grounding Benchmark")
|
96 |
+
gr.Markdown(
|
97 |
+
"### FACTS Medical Grounding is a benchmark designed to evaluate Open Models over medical domain."
|
98 |
+
)
|
99 |
+
|
100 |
+
with gr.Tabs():
|
101 |
+
with gr.TabItem("Leaderboard"):
|
102 |
+
# Filters at the top
|
103 |
+
with gr.Row():
|
104 |
+
with gr.Column(scale=2):
|
105 |
+
search_box = gr.Textbox(
|
106 |
+
label="Model Search",
|
107 |
+
placeholder="Search for a model name...",
|
108 |
+
value="",
|
109 |
+
)
|
110 |
+
|
111 |
+
with gr.Column(scale=1):
|
112 |
+
sort_dropdown = gr.Dropdown(
|
113 |
+
choices=[
|
114 |
+
"Combined Score",
|
115 |
+
"Separate Grounding Score",
|
116 |
+
"Separate Quality Score",
|
117 |
+
],
|
118 |
+
value="Combined Score",
|
119 |
+
label="Sort by",
|
120 |
+
elem_classes="sort-dropdown",
|
121 |
+
)
|
122 |
+
|
123 |
+
# Size filters in a row
|
124 |
+
with gr.Row():
|
125 |
+
gr.Markdown("**Filter by Model Size:**")
|
126 |
+
size_checkboxes = gr.CheckboxGroup(
|
127 |
+
choices=["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"],
|
128 |
+
value=["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"],
|
129 |
+
label="",
|
130 |
+
elem_classes="size-filter",
|
131 |
+
container=False,
|
132 |
+
)
|
133 |
+
|
134 |
+
# Model count
|
135 |
+
total_models = gr.Markdown(f"**Showing {len(df)} models**")
|
136 |
+
|
137 |
+
# Results table below filters
|
138 |
+
results_table = gr.Dataframe(
|
139 |
+
value=filter_and_search_models(
|
140 |
+
"",
|
141 |
+
["0-5B", "5-10B", "10-20B", "20-40B", "40-80B", ">80B"],
|
142 |
+
"Combined Score",
|
143 |
+
),
|
144 |
+
headers=[
|
145 |
+
"Rank",
|
146 |
+
"Model Name",
|
147 |
+
"Separate Grounding Score",
|
148 |
+
"Separate Quality Score",
|
149 |
+
"Size",
|
150 |
+
"Combined Score",
|
151 |
+
],
|
152 |
+
datatype=["number", "str", "str", "number", "number", "number"],
|
153 |
+
elem_id="leaderboard-table",
|
154 |
+
interactive=False,
|
155 |
+
wrap=True,
|
156 |
)
|
157 |
+
|
158 |
+
# Metric explanations at the bottom
|
159 |
+
with gr.Accordion("Metric Explanations", open=False):
|
160 |
+
gr.Markdown(
|
161 |
+
"""
|
162 |
+
- **Grounding Score**: Measures the model's ability to provide factually accurate responses based on given context
|
163 |
+
- **Quality Score**: Evaluates the overall quality of the model's responses including coherence and relevance
|
164 |
+
- **Combined Score**: A weighted combination of grounding and quality scores representing overall performance
|
165 |
+
"""
|
166 |
+
)
|
167 |
+
|
168 |
+
with gr.TabItem("About"):
|
169 |
+
gr.Markdown(
|
170 |
+
"""
|
171 |
+
# About This Evaluation
|
172 |
+
|
173 |
+
## FACTS Grounding Leaderboard
|
174 |
+
|
175 |
+
The FACTS Grounding Leaderboard is a benchmark developed by Google DeepMind to evaluate how well Large Language Models (LLMs) can generate factually accurate responses that are fully grounded in provided context documents.
|
176 |
+
|
177 |
+
### How It Works:
|
178 |
+
1. **Input**: Each example contains a system instruction, a context document (up to 32k tokens), and a user request
|
179 |
+
2. **Task**: Models must generate responses that answer the user's request using ONLY information from the provided context
|
180 |
+
3. **Evaluation**: Responses are evaluated in two phases:
|
181 |
+
- **Quality Check**: Does the response adequately address the user's request?
|
182 |
+
- **Grounding Check**: Is every claim in the response supported by the context document?
|
183 |
+
|
184 |
+
## Medical Domain Variation
|
185 |
+
|
186 |
+
This implementation focuses specifically on medical domain examples from the FACTS benchmark to evaluate smaller, open-source models in healthcare contexts.
|
187 |
+
|
188 |
+
### Key Modifications:
|
189 |
+
- **Domain-Specific**: Uses only the 236 medical examples from the original 860-example dataset
|
190 |
+
- **Single Judge Model**: Employs Gemini 1.5 Flash as the sole evaluator (vs. the original's ensemble of 3 models)
|
191 |
+
- **Focus on Open Models**: Evaluates open-source models often missing from mainstream leaderboards for medical domain
|
192 |
+
|
193 |
+
### Why Medical Domain?
|
194 |
+
Medical information requires exceptional accuracy and grounding. By focusing on this domain, we can assess how well smaller models handle critical healthcare information while strictly adhering to provided sources—a crucial capability for safe medical AI applications.
|
195 |
+
|
196 |
+
### Evaluation Metrics:
|
197 |
+
- **Grounding Score**: Percentage of responses where all claims are supported by the context
|
198 |
+
- **Quality Score**: Percentage of responses that adequately address the user's request
|
199 |
+
- **Combined Score**: Percentage of responses that pass both quality and grounding checks
|
200 |
+
|
201 |
+
This focused approach enables rapid iteration and testing of smaller models on domain-specific factual grounding tasks.
|
202 |
+
|
203 |
+
---
|
204 |
+
|
205 |
+
## References
|
206 |
+
|
207 |
+
- **Original Leaderboard by Google**: [FACTS Grounding Benchmark Leaderboard](https://www.kaggle.com/benchmarks/google/facts-grounding/leaderboard)
|
208 |
+
- **Public Dataset**: [FACTS Grounding Examples Dataset](https://www.kaggle.com/datasets/deepmind/facts-grounding-examples/data)
|
209 |
+
- **Technical Documentation**: [FACTS Grounding Benchmark Starter Code](https://www.kaggle.com/code/andrewmingwang/facts-grounding-benchmark-starter-code/notebook)
|
210 |
+
|
211 |
+
---
|
212 |
+
"""
|
213 |
)
|
214 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
# Update table when filters change
|
216 |
def update_table(search, sizes, sort_by):
|
217 |
filtered_df = filter_and_search_models(search, sizes, sort_by)
|
218 |
model_count = f"**Showing {len(filtered_df)} models**"
|
219 |
return filtered_df, model_count
|
220 |
+
|
221 |
# Connect all inputs to the update function
|
222 |
search_box.change(
|
223 |
fn=update_table,
|
224 |
inputs=[search_box, size_checkboxes, sort_dropdown],
|
225 |
+
outputs=[results_table, total_models],
|
226 |
)
|
227 |
+
|
228 |
size_checkboxes.change(
|
229 |
fn=update_table,
|
230 |
inputs=[search_box, size_checkboxes, sort_dropdown],
|
231 |
+
outputs=[results_table, total_models],
|
232 |
)
|
233 |
+
|
234 |
sort_dropdown.change(
|
235 |
fn=update_table,
|
236 |
inputs=[search_box, size_checkboxes, sort_dropdown],
|
237 |
+
outputs=[results_table, total_models],
|
238 |
)
|
239 |
+
|
240 |
# Add custom CSS for better styling
|
241 |
app.css = """
|
242 |
#leaderboard-table {
|
|
|
245 |
max-height: 600px;
|
246 |
overflow-y: auto;
|
247 |
}
|
248 |
+
|
249 |
#leaderboard-table td:first-child {
|
250 |
+
text-align: center;
|
251 |
+
font-weight: 600;
|
252 |
+
color: #444;
|
253 |
+
background-color: #f8f9fa;
|
254 |
+
width: 60px;
|
255 |
+
}
|
256 |
+
|
257 |
+
#leaderboard-table td:nth-child(2) {
|
258 |
font-weight: 500;
|
259 |
max-width: 400px;
|
260 |
}
|
261 |
+
|
262 |
+
#leaderboard-table td:nth-child(3) {
|
263 |
text-align: center;
|
264 |
font-weight: 500;
|
265 |
color: #666;
|
266 |
}
|
267 |
+
|
268 |
+
#leaderboard-table td:nth-child(n+4) {
|
269 |
text-align: center;
|
270 |
}
|
271 |
+
|
272 |
.size-filter {
|
273 |
display: flex;
|
274 |
flex-wrap: wrap;
|
275 |
gap: 15px;
|
276 |
margin-top: 10px;
|
277 |
}
|
278 |
+
|
279 |
.size-filter label {
|
280 |
display: flex;
|
281 |
align-items: center;
|
282 |
margin: 0;
|
283 |
}
|
284 |
+
|
285 |
.size-filter input[type="checkbox"] {
|
286 |
margin-right: 5px;
|
287 |
}
|
288 |
+
|
289 |
/* Highlight rows based on model family */
|
290 |
#leaderboard-table tr:has(td:contains("meta-llama")) {
|
291 |
background-color: #fffbf0;
|
292 |
}
|
293 |
+
|
294 |
#leaderboard-table tr:has(td:contains("deepseek")) {
|
295 |
background-color: #f0f8ff;
|
296 |
}
|
297 |
+
|
298 |
#leaderboard-table tr:has(td:contains("Qwen")) {
|
299 |
background-color: #f5fff5;
|
300 |
}
|
301 |
+
|
302 |
#leaderboard-table tr:has(td:contains("google")) {
|
303 |
background-color: #fff0f5;
|
304 |
}
|
305 |
+
|
306 |
/* Header styling */
|
307 |
#leaderboard-table th {
|
308 |
background-color: #f8f9fa;
|
309 |
font-weight: 600;
|
310 |
}
|
|
|
311 |
|
312 |
+
#leaderboard-table th:first-child {
|
313 |
+
width: 60px;
|
314 |
+
text-align: center;
|
315 |
+
}
|
316 |
+
"""
|
317 |
|
318 |
# Launch the app
|
319 |
if __name__ == "__main__":
|
320 |
+
app.launch()
|