Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -107,7 +107,7 @@ with demo:
|
|
107 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
108 |
|
109 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
110 |
-
with gr.TabItem("π
|
111 |
with gr.Column():
|
112 |
gr.Markdown("## Model Elo Rankings") # New title for the section
|
113 |
category_selector = gr.Radio(
|
@@ -138,92 +138,92 @@ with demo:
|
|
138 |
# (Content unchanged)
|
139 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
140 |
|
141 |
-
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
|
228 |
with gr.Row():
|
229 |
with gr.Accordion("π Citation", open=False):
|
|
|
107 |
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
|
108 |
|
109 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
110 |
+
with gr.TabItem("π
MLE-Dojo Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
111 |
with gr.Column():
|
112 |
gr.Markdown("## Model Elo Rankings") # New title for the section
|
113 |
category_selector = gr.Radio(
|
|
|
138 |
# (Content unchanged)
|
139 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
140 |
|
141 |
+
# with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
142 |
+
# # (Content unchanged, still uses potentially empty/mock queue data)
|
143 |
+
# with gr.Column():
|
144 |
+
# with gr.Row():
|
145 |
+
# gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
146 |
+
# with gr.Column():
|
147 |
+
# with gr.Accordion(
|
148 |
+
# f"β
Finished Evaluations ({len(finished_eval_queue_df)})",
|
149 |
+
# open=False,
|
150 |
+
# ):
|
151 |
+
# with gr.Row():
|
152 |
+
# finished_eval_table = gr.components.Dataframe(
|
153 |
+
# value=finished_eval_queue_df,
|
154 |
+
# headers=EVAL_COLS,
|
155 |
+
# datatype=EVAL_TYPES,
|
156 |
+
# row_count=5,
|
157 |
+
# )
|
158 |
+
# with gr.Accordion(
|
159 |
+
# f"π Running Evaluation Queue ({len(running_eval_queue_df)})",
|
160 |
+
# open=False,
|
161 |
+
# ):
|
162 |
+
# with gr.Row():
|
163 |
+
# running_eval_table = gr.components.Dataframe(
|
164 |
+
# value=running_eval_queue_df,
|
165 |
+
# headers=EVAL_COLS,
|
166 |
+
# datatype=EVAL_TYPES,
|
167 |
+
# row_count=5,
|
168 |
+
# )
|
169 |
+
# with gr.Accordion(
|
170 |
+
# f"β³ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
171 |
+
# open=False,
|
172 |
+
# ):
|
173 |
+
# with gr.Row():
|
174 |
+
# pending_eval_table = gr.components.Dataframe(
|
175 |
+
# value=pending_eval_queue_df,
|
176 |
+
# headers=EVAL_COLS,
|
177 |
+
# datatype=EVAL_TYPES,
|
178 |
+
# row_count=5,
|
179 |
+
# )
|
180 |
+
|
181 |
+
# with gr.Row():
|
182 |
+
# gr.Markdown("# βοΈβ¨ Submit your model here!", elem_classes="markdown-text")
|
183 |
+
# with gr.Row():
|
184 |
+
# # Submission form - kept as is
|
185 |
+
# with gr.Column():
|
186 |
+
# model_name_textbox = gr.Textbox(label="Model name")
|
187 |
+
# revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
188 |
+
# model_type = gr.Dropdown(
|
189 |
+
# choices=["Type A", "Type B", "Type C"], # Example choices
|
190 |
+
# label="Model type",
|
191 |
+
# multiselect=False,
|
192 |
+
# value=None,
|
193 |
+
# interactive=True,
|
194 |
+
# )
|
195 |
+
# with gr.Column():
|
196 |
+
# precision = gr.Dropdown(
|
197 |
+
# choices=["float16", "bfloat16", "float32", "int8"], # Example choices
|
198 |
+
# label="Precision",
|
199 |
+
# multiselect=False,
|
200 |
+
# value="float16",
|
201 |
+
# interactive=True,
|
202 |
+
# )
|
203 |
+
# weight_type = gr.Dropdown(
|
204 |
+
# choices=["Original", "Adapter", "Delta"], # Example choices
|
205 |
+
# label="Weights type",
|
206 |
+
# multiselect=False,
|
207 |
+
# value="Original",
|
208 |
+
# interactive=True,
|
209 |
+
# )
|
210 |
+
# base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
211 |
+
|
212 |
+
# submit_button = gr.Button("Submit Eval")
|
213 |
+
# submission_result = gr.Markdown()
|
214 |
+
|
215 |
+
# submit_button.click(
|
216 |
+
# add_new_eval,
|
217 |
+
# [
|
218 |
+
# model_name_textbox,
|
219 |
+
# base_model_name_textbox,
|
220 |
+
# revision_name_textbox,
|
221 |
+
# precision,
|
222 |
+
# weight_type,
|
223 |
+
# model_type,
|
224 |
+
# ],
|
225 |
+
# submission_result,
|
226 |
+
# )
|
227 |
|
228 |
with gr.Row():
|
229 |
with gr.Accordion("π Citation", open=False):
|