File size: 25,070 Bytes
6dd5c50
88c98d4
 
 
 
75fed94
6dd5c50
88c98d4
 
75fed94
96e55d5
 
 
6dd5c50
f460af4
75fed94
f460af4
6dd5c50
770f2f9
6dd5c50
3a984c4
6dd5c50
640136e
f460af4
6dd5c50
f460af4
88c98d4
6dd5c50
 
 
33298d1
6dd5c50
 
88c98d4
6dd5c50
 
 
 
f460af4
 
 
88c98d4
75fed94
6dd5c50
a83b29d
62dcae0
6dd5c50
 
 
 
 
 
88c98d4
6dd5c50
 
 
88c98d4
6dd5c50
 
 
 
 
75fed94
6dd5c50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88c98d4
 
 
6dd5c50
 
 
88c98d4
 
 
 
 
 
 
6dd5c50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88c98d4
6dd5c50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75fed94
6dd5c50
 
88c98d4
6dd5c50
 
 
 
 
 
 
 
88c98d4
6dd5c50
 
 
 
 
75fed94
51d7804
75fed94
 
51d7804
 
 
 
 
f460af4
51d7804
 
 
75fed94
51d7804
 
 
 
75fed94
51d7804
75fed94
51d7804
 
75fed94
51d7804
 
75fed94
51d7804
 
 
 
88c98d4
6dd5c50
 
 
 
51d7804
 
75fed94
 
 
 
 
 
 
c259566
 
 
f460af4
51d7804
 
f460af4
 
 
770f2f9
6dd5c50
5883014
f460af4
 
 
88c98d4
6dd5c50
 
 
 
88c98d4
6dd5c50
 
 
 
 
 
 
88c98d4
6dd5c50
 
75fed94
f460af4
 
 
 
 
 
 
 
 
 
 
 
88c98d4
f460af4
 
6dd5c50
75fed94
6dd5c50
 
 
 
 
 
88c98d4
75fed94
6dd5c50
 
 
 
 
 
 
 
 
 
 
88c98d4
6dd5c50
 
 
88c98d4
 
6dd5c50
88c98d4
 
 
 
 
6dd5c50
75fed94
6dd5c50
 
 
 
 
 
 
88c98d4
6dd5c50
 
 
88c98d4
6dd5c50
 
 
88c98d4
6dd5c50
 
88c98d4
51d7804
 
 
6dd5c50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88c98d4
6dd5c50
 
 
 
88c98d4
6dd5c50
 
 
 
 
 
 
 
 
 
88c98d4
6dd5c50
 
 
 
 
 
 
75fed94
6dd5c50
 
88c98d4
 
 
 
 
c259566
 
 
 
 
96e55d5
88c98d4
 
 
6dd5c50
f460af4
 
 
 
88c98d4
6dd5c50
 
 
88c98d4
 
 
 
 
 
96e55d5
88c98d4
 
 
74240b0
6dd5c50
88c98d4
 
 
 
 
 
 
 
96e55d5
88c98d4
 
 
 
 
 
 
 
 
75fed94
88c98d4
 
51d7804
88c98d4
 
75fed94
 
 
 
 
 
 
 
 
88c98d4
 
 
 
 
c259566
88c98d4
 
 
 
 
 
 
 
 
c259566
88c98d4
 
74240b0
88c98d4
 
 
 
 
 
 
 
 
 
 
f460af4
88c98d4
 
 
 
f460af4
74240b0
6dd5c50
c259566
f460af4
 
c259566
 
 
 
 
 
 
 
 
 
 
 
 
75fed94
 
 
 
 
 
 
 
c259566
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74240b0
c259566
f460af4
 
 
 
 
 
 
 
 
 
 
 
c259566
f460af4
c259566
f460af4
6dd5c50
75fed94
 
 
88c98d4
 
 
 
c259566
 
 
770f2f9
 
6dd5c50
770f2f9
88c98d4
 
 
866d755
640136e
88c98d4
 
 
6dd5c50
 
 
 
 
 
 
770f2f9
 
 
 
88c98d4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
import os
from pathlib import Path

import gradio as gr
import numpy as np
import pandas as pd
from datasets import load_dataset
from huggingface_hub import HfApi, snapshot_download

from leaderboard.constants import example_counts, subset_mapping
from leaderboard.css import custom_css
from leaderboard.md import *
from leaderboard.utils import load_all_data

#######################################################
#                     Setup                           #
#######################################################
api = HfApi()

COLLAB_TOKEN = os.environ.get("COLLAB_TOKEN")
evals_repo = "allenai/reward-bench-2-results"

eval_set_repo = "allenai/reward-bench-2"
eval_set_repo_v1 = "allenai/reward-bench"

repo_dir_rewardbench = "./evals/rewardbench/"

print("Pulling evaluation results")
repo = snapshot_download(
    local_dir=repo_dir_rewardbench,
    ignore_patterns=["pref-sets-scores/*", "eval-set-scores/*", "eval-set/allenai/open_instruct_dev*"],
    repo_id=evals_repo,
    use_auth_token=COLLAB_TOKEN,
    tqdm_class=None,
    etag_timeout=30,
    repo_type="dataset",
)

###########################################
#                 Load Data               #
###########################################


def avg_over_rewardbench_v2(dataframe_core):
    domain_cols = ["Factuality", "Precise IF", "Math", "Safety", "Focus", "Ties"]
    domain_weights = [1, 1, 1, 1, 1, 1]
    new_df = dataframe_core.copy()

    # for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
    # Get the domain data and handle missing values
    domain_data = new_df[domain_cols].values
    masked_data = np.ma.masked_array(domain_data, np.isnan(domain_data))

    # Calculate weighted average
    average = np.ma.average(masked_data, axis=1, weights=domain_weights)
    new_df["average"] = average.filled(np.nan)

    # Rearrange columns for consistent output
    keep_columns = ["model", "model_type", "average"] + domain_cols
    new_df = new_df[keep_columns]
    return new_df


def avg_over_rewardbench(dataframe_core, dataframe_prefs):
    """
    Averages over the subsets alpacaeval, mt-bench, llmbar, refusals, hep and returns dataframe with only these columns.

    We average over 4 core sections (per prompt weighting):
    1. Chat: Includes the easy chat subsets (alpacaeval-easy, alpacaeval-length, alpacaeval-hard, mt-bench-easy, mt-bench-medium)
    2. Chat Hard: Includes the hard chat subsets (mt-bench-hard, llmbar-natural, llmbar-adver-neighbor, llmbar-adver-GPTInst, llmbar-adver-GPTOut, llmbar-adver-manual)
    3. Safety: Includes the safety subsets (refusals-dangerous, refusals-offensive, xstest-should-refuse, xstest-should-respond, do not answer)
    4. Reasoning: Includes the code and math subsets (math-prm, hep-cpp, hep-go, hep-java, hep-js, hep-python, hep-rust)
    5. Prior Sets (0.5 weight): Includes the test sets (anthropic_helpful, mtbench_human, shp, summarize)
    """
    new_df = dataframe_core.copy()
    dataframe_prefs = dataframe_prefs.copy()

    # for main subsets, keys in subset_mapping, take the weighted avg by example_counts and store for the models
    for subset, sub_subsets in subset_mapping.items():
        subset_cols = [col for col in new_df.columns if col in sub_subsets]
        sub_data = new_df[subset_cols].values  # take the relevant column values
        sub_counts = [example_counts[s] for s in subset_cols]  # take the example counts
        new_df[subset] = np.average(sub_data, axis=1, weights=sub_counts)  # take the weighted average
        # new_df[subset] = np.round(np.nanmean(new_df[subset_cols].values, axis=1), 2)

    data_cols = list(subset_mapping.keys())
    keep_columns = (
        [
            "model",
        ]
        + ["model_type"]
        + data_cols
    )
    # keep_columns = ["model", "average"] + subsets
    new_df = new_df[keep_columns]

    # selected average from pref_sets
    pref_columns = ["anthropic_helpful", "anthropic_hhh", "shp", "summarize"]
    pref_data = dataframe_prefs[pref_columns].values

    # add column test sets knowing the rows are not identical, take superset
    dataframe_prefs["Prior Sets (0.5 weight)"] = np.nanmean(pref_data, axis=1)

    # add column Test Sets empty to new_df
    new_df["Prior Sets (0.5 weight)"] = np.nan
    # per row in new_df if model is in dataframe_prefs, add the value to new_df["Prior Sets (0.5 weight)"]
    values = []
    for i, row in new_df.iterrows():
        model = row["model"]
        if model in dataframe_prefs["model"].values:
            values.append(dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0])
            # new_df.at[i, "Prior Sets (0.5 weight)"] = dataframe_prefs[dataframe_prefs["model"] == model]["Prior Sets (0.5 weight)"].values[0]
        else:
            values.append(np.nan)

    new_df["Prior Sets (0.5 weight)"] = values

    # add total average
    data_cols += ["Prior Sets (0.5 weight)"]
    final_data = new_df[data_cols].values
    masked_data = np.ma.masked_array(final_data, np.isnan(final_data))
    weights = [2, 2, 2, 2, 1]
    average = np.ma.average(masked_data, axis=1, weights=weights)
    new_df["average"] = average.filled(np.nan)
    # new_df["average"] = np.nanmean(new_df[data_cols].values, axis=1)

    # make average third column
    keep_columns = ["model", "model_type", "average"] + data_cols
    new_df = new_df[keep_columns]
    return new_df


def prep_df(df):
    # add column to 0th entry with count (column name itself empty)
    df.insert(0, "", range(1, 1 + len(df)))

    # replace "model" with "Model" and "model_type" with "Model Type" and "average" with "Average"
    df = df.rename(columns={"model": "Model", "model_type": "Model Type", "average": "Average"})

    # if "Model Type" in columns
    if "Model Type" in df.columns:
        # get model_types that have generative in them
        mask = df["Model Type"].str.contains("generative", case=False, na=False)

        # set these values to "Generative"
        df.loc[mask, "Model Type"] = "Generative"

    return df


# get v1 data
orig_data_path = "leaderboard/final-rbv1-data.csv"
rb_orig_snapshot = pd.read_csv(orig_data_path)
# rename column "Unnamed: 0" to ""
rb_orig_snapshot = rb_orig_snapshot.rename(columns={"Unnamed: 0": ""})
# rb_orig_snapshot = rb_orig_snapshot.drop(columns=["Unnamed: 0", ''])
rb_orig_snapshot.reset_index(drop=True, inplace=True)

rewardbench_data = load_all_data(repo_dir_rewardbench, subdir="eval-set").sort_values(by="average", ascending=False)
rewardbench_data_avg_intermediate = avg_over_rewardbench_v2(rewardbench_data.copy())

# Prepare RBv1 scores for merging
rb_v1_scores_to_merge = rb_orig_snapshot[["Model", "Score"]].copy()

# if " ⚠️" in rb_v1_scores_to_merge["Model"].values, shorten the model name without it
rb_v1_scores_to_merge["Model"] = rb_v1_scores_to_merge["Model"].str.replace(" ⚠️", "", regex=False)

rb_v1_scores_to_merge.rename(columns={"Score": "RBv1"}, inplace=True)
# rename rb_v1 "Model" to "model"
rb_v1_scores_to_merge.rename(columns={"Model": "model"}, inplace=True)

# Merge RBv1 scores into the v2 data
rewardbench_data_avg = pd.merge(rewardbench_data_avg_intermediate, rb_v1_scores_to_merge, on="model", how="left")

# Drop any models with only RBv1 scores and no v2 scores
rewardbench_data_avg = rewardbench_data_avg.dropna(subset=["average"])

# Sort by the v2 average
rewardbench_data_avg = rewardbench_data_avg.sort_values(by="average", ascending=False)


# add count column to all dataframes
rewardbench_data = prep_df(rewardbench_data)
rewardbench_data_avg = prep_df(rewardbench_data_avg).rename(columns={"Average": "Score"})

# Ensure RBv1 is the last column if it's not already (merge usually places it at the end of non-key columns)
# If 'RBv1' is present and not last, move it to be the last column.
if "RBv1" in rewardbench_data_avg.columns:
    rbv1_col = rewardbench_data_avg.pop("RBv1")
    rewardbench_data_avg["RBv1"] = rbv1_col

# save rewardbench_data_avg as csv to src/current-rbv2-data.csv
v2_data_path = "leaderboard/current-rbv2-data.csv"
rewardbench_data_avg.to_csv(v2_data_path, index=False)

col_types_rewardbench = ["number"] + ["markdown"] + ["str"] + ["number"] * (len(rewardbench_data_avg.columns) - 1)
col_types_rewardbench_v1 = ["number"] + ["markdown"] + ["str"] + ["number"] * (len(rb_orig_snapshot.columns) - 1)

# import ipdb; ipdb.set_trace()

###########################################
# Leaderboard Helpers & Setting           #
###########################################

# for showing random samples
eval_set = load_dataset(eval_set_repo, use_auth_token=COLLAB_TOKEN, split="test")
eval_set_v1 = load_dataset(eval_set_repo_v1, use_auth_token=COLLAB_TOKEN, split="filtered")
subsets = eval_set.unique("subset")
subsets_v1 = eval_set_v1.unique("subset")

def random_sample(r: gr.Request, subset):
    if subset is None or subset == []:
        sample_index = np.random.randint(0, len(eval_set) - 1)
        sample = eval_set[sample_index]
    else:  # filter by subsets (can be list)
        if isinstance(subset, str):
            subset = [subset]
        # filter down dataset to only include the subset(s)
        eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset)
        sample_index = np.random.randint(0, len(eval_set_filtered) - 1)
        sample = eval_set_filtered[sample_index]

    markdown_text = "\n\n".join([f"**{key}**:\n\n{value}" for key, value in sample.items()])
    return markdown_text


# Duplicating because they use global variables with gradio setup
def random_sample_v1(r: gr.Request, subset):
    if subset is None or subset == []:
        sample_index = np.random.randint(0, len(eval_set) - 1)
        sample = eval_set[sample_index]
    else:  # filter by subsets (can be list)
        if isinstance(subset, str):
            subset = [subset]
        # filter down dataset to only include the subset(s)
        eval_set_filtered = eval_set.filter(lambda x: x["subset"] in subset)
        sample_index = np.random.randint(0, len(eval_set_filtered) - 1)
        sample = eval_set_filtered[sample_index]

    markdown_text = "\n\n".join([f"**{key}**:\n\n{value}" for key, value in sample.items()])
    return markdown_text


color_map = {
    "Generative": "#7497db",
    "Custom Classifier": "#E8ECF2",
    "Seq. Classifier": "#ffcd75",
    "DPO": "#75809c",
}


def color_model_type_column(df, color_map):
    """
    Apply color to the 'Model Type' column of the DataFrame based on a given color mapping.

    Parameters:
    df (pd.DataFrame): The DataFrame containing the 'Model Type' column.
    color_map (dict): A dictionary mapping model types to colors.

    Returns:
    pd.Styler: The styled DataFrame.
    """

    # Function to apply color based on the model type
    def apply_color(val):
        color = color_map.get(val, "default")  # Default color if not specified in color_map
        return f"background-color: {color}"

    # Format for different columns
    format_dict = {col: "{:.1f}" for col in df.columns if col not in ["Average", "Model", "Model Type"]}
    format_dict["Average"] = "{:.2f}"
    format_dict[""] = "{:d}"

    return df.style.applymap(apply_color, subset=["Model Type"]).format(format_dict, na_rep="")


def regex_table(dataframe, regex, filter_button, style=True):
    """
    Takes a model name as a regex, then returns only the rows that has that in it.
    """
    # Split regex statement by comma and trim whitespace around regexes
    regex_list = [x.strip() for x in regex.split(",")]
    # Join the list into a single regex pattern with '|' acting as OR
    combined_regex = "|".join(regex_list)

    # remove internal ai2 data
    dataframe = dataframe[~dataframe["Model"].str.contains("ai2", case=False, na=False)]

    # if filter_button, remove all rows with "ai2" in the model name
    update_scores = False
    if isinstance(filter_button, list) or isinstance(filter_button, str):
        if "Prior Sets" not in filter_button and "Prior Sets (0.5 weight)" in dataframe.columns:
            update_scores = True
            # remove the column "Prior Sets (0.5 weight)" from the outputted table
            dataframe = dataframe.drop(columns=["Prior Sets (0.5 weight)"])
        if "RBv1" not in filter_button and "RBv1" in dataframe.columns:
            # remove the column "Prior Sets (0.5 weight)" from the outputted table
            dataframe = dataframe.drop(columns=["RBv1"])
        if "Seq. Classifiers" not in filter_button:
            dataframe = dataframe[~dataframe["Model Type"].str.contains("Seq. Classifier", case=False, na=False)]
        if "DPO" not in filter_button:
            dataframe = dataframe[~dataframe["Model Type"].str.contains("DPO", case=False, na=False)]
        if "Custom Classifiers" not in filter_button:
            dataframe = dataframe[~dataframe["Model Type"].str.contains("Custom Classifier", case=False, na=False)]
        if "Generative" not in filter_button:
            dataframe = dataframe[~dataframe["Model Type"].str.contains("generative", case=False, na=False)]
    # Filter the dataframe such that 'model' contains any of the regex patterns
    data = dataframe[dataframe["Model"].str.contains(combined_regex, case=False, na=False)]

    # if update the score to not use prior sets, do so
    if update_scores:
        data["Score"] = (data["Chat"] + data["Chat Hard"] + data["Safety"] + data["Reasoning"]) / 4
        # if "Prior Sets (0.5 weight)" in data.columns:
        # data["Prior Sets (0.5 weight)"] = np.nan
        # sort array by Score column
        data = data.sort_values(by="Score", ascending=False)

    data.reset_index(drop=True, inplace=True)

    # replace column '' with count/rank
    data[""] = np.arange(1, 1 + len(data))

    # if Score exists, round to 2 decimals
    if "Score" in data.columns:
        data["Score"] = np.round(np.array(data["Score"].values).astype(float), 2)
    if "Average" in data.columns:
        data["Average"] = np.round(np.array(data["Average"].values).astype(float), 1)
    # round all others to 1 decimal
    for col in data.columns:
        if col not in ["", "Model", "Model Type", "Score", "Average"]:
            # replace any data[col].values == '' with np.nan
            data[col] = data[col].replace("", np.nan)
            data[col] = np.round(np.array(data[col].values).astype(float), 1)
    if style:
        # apply color
        data = color_model_type_column(data, color_map)

    return data


# import ipdb; ipdb.set_trace()

total_models = len(
    regex_table(
        rewardbench_data_avg.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"], style=False
    ).values
)
total_models_v1 = len(
    regex_table(
        rb_orig_snapshot.copy(), "", ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"], style=False
    ).values
)
assets = Path("leaderboard").resolve()  # absolute dir with the image

# Using a string for a predefined color
theme = gr.themes.Default(primary_hue="blue")

#############################################
#                 Gradio App                #
#############################################

with gr.Blocks(theme=theme, css=custom_css) as app:
    # create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About"
    with gr.Row():
        with gr.Column(scale=6):
            gr.Markdown(TOP_TEXT)
        # with gr.Column(scale=4):
        #     # search = gr.Textbox(label="Model Search (delimit with , )", placeholder="Regex search for a model")
        #     # filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
        #     # img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
        #     gr.Markdown("""
        #                 ![](/gradio_api/file=leaderboard/logo.png)
        #                 """)

    with gr.Tabs(elem_id="outer-tabs", elem_classes="tabs-big") as tabs_big:
        with gr.TabItem("🏆 RewardBench 2"):
            with gr.Row():
                with gr.Column(scale=7):
                    gr.Markdown(CAPTION_V2.format(str(total_models)))
                with gr.Column(scale=3):
                    # search = gr.Textbox(label="Model Search (delimit with , )", placeholder="Regex search for a model")
                    # filter_button = gr.Checkbox(label="Include AI2 training runs (or type ai2 above).", interactive=True)
                    # img = gr.Image(value="https://private-user-images.githubusercontent.com/10695622/310698241-24ed272a-0844-451f-b414-fde57478703e.png", width=500)
                    gr.Markdown(
                        """
                                ![](/gradio_api/file=leaderboard/logo.png)
                                """
                    )
            with gr.Tabs(elem_id="inner-tabs", elem_classes="tabs-small") as tabs:
                with gr.TabItem("Leaderboard"):
                    with gr.Row():
                        search_1 = gr.Textbox(
                            label="Model Search (delimit with , )",
                            placeholder="Model Search (delimit with , )",
                            show_label=False,
                            scale=8,
                        )
                        model_types_1 = gr.CheckboxGroup(
                            ["Seq. Classifiers", "Custom Classifiers", "Generative", "RBv1"],
                            value=["Seq. Classifiers", "Custom Classifiers", "Generative"],
                            show_label=False,
                            scale=8,
                        )
                        # narrow, non-expanding download button
                        gr.DownloadButton(
                            label="Download CSV",
                            value=v2_data_path,
                            size="sm",         # shorter height / padding
                            scale=0,           # ← **width stays just big enough for the text**
                            min_width=140,     # (optional) guarantee it doesn’t collapse
                        )
                    with gr.Row():
                        # reference data
                        rewardbench_table_hidden = gr.Dataframe(
                            rewardbench_data_avg.values,
                            datatype=col_types_rewardbench_v1,
                            headers=rewardbench_data_avg.columns.tolist(),
                            visible=False,
                        )
                        rewardbench_table = gr.Dataframe(
                            regex_table(
                                rewardbench_data_avg.copy(),
                                "",
                                ["Seq. Classifiers", "Custom Classifiers", "Generative"],
                            ),
                            datatype=col_types_rewardbench_v1,
                            headers=rewardbench_data_avg.columns.tolist(),
                            elem_id="rewardbench_dataframe_avg",
                            height=800,  # 800 px ≈ ~25 rows on default row-height
                        )

                with gr.TabItem("About"):
                    with gr.Row():
                        gr.Markdown(ABOUT_TEXT_V2)

                with gr.TabItem("Dataset Viewer"):
                    with gr.Row():
                        # loads one sample
                        gr.Markdown("""## Random Dataset Sample Viewer""")
                        subset_selector = gr.Dropdown(subsets, label="Subset", value=None, multiselect=True)
                        button_data = gr.Button("Show Random Sample")

                    with gr.Row():
                        sample_display = gr.Markdown("{sampled data loads here}")

                    button_data.click(fn=random_sample, inputs=[subset_selector], outputs=[sample_display])
        with gr.TabItem("RewardBench"):
            with gr.Row():
                gr.Markdown(CAPTION_V1.format(str(total_models_v1)))
            with gr.Tabs(elem_id="inner-tabs", elem_classes="tabs-small") as tabs:
                with gr.TabItem("Leaderboard"):
                    with gr.Row():
                        search_1_v1 = gr.Textbox(
                            label="Model Search (delimit with , )",
                            placeholder="Model Search (delimit with , )",
                            show_label=False,
                        )
                        model_types_1_v1 = gr.CheckboxGroup(
                            ["Seq. Classifiers", "DPO", "Custom Classifiers", "Generative"],
                            value=["Seq. Classifiers", "Custom Classifiers", "Generative"],
                            label="Model Types",
                            show_label=False,
                            #  info="Which model types to include.",
                        )
                        # narrow, non-expanding download button
                        gr.DownloadButton(
                            label="Download CSV",
                            value=orig_data_path,
                            size="sm",         # shorter height / padding
                            scale=0,           # ← **width stays just big enough for the text**
                            min_width=140,     # (optional) guarantee it doesn’t collapse
                        )
                    with gr.Row():
                        # reference data
                        rewardbench_table_hidden_v1 = gr.Dataframe(
                            rb_orig_snapshot.values,
                            datatype=col_types_rewardbench,
                            headers=rb_orig_snapshot.columns.tolist(),
                            visible=False,
                        )
                        rewardbench_table_v1 = gr.Dataframe(
                            regex_table(
                                rb_orig_snapshot.copy(),
                                "",
                                ["Seq. Classifiers", "Custom Classifiers", "Generative"],
                            ),
                            datatype=col_types_rewardbench,
                            headers=rb_orig_snapshot.columns.tolist(),
                            elem_id="rewardbench_dataframe_avg_v1",
                            height=800,  # 800 px ≈ ~25 rows on default row-height
                        )
                with gr.TabItem("About"):
                    with gr.Row():
                        gr.Markdown(ABOUT_TEXT_V1)

                with gr.TabItem("Dataset Viewer"):
                    with gr.Row():
                        # loads one sample
                        gr.Markdown("""## Random Dataset Sample Viewer""")
                        subset_selector_v1 = gr.Dropdown(subsets_v1, label="Subset", value=None, multiselect=True)
                        button_data_v1 = gr.Button("Show Random Sample")

                    with gr.Row():
                        sample_display_v1 = gr.Markdown("{sampled data loads here}")

                    button_data_v1.click(fn=random_sample_v1, inputs=[subset_selector_v1], outputs=[sample_display_v1])

    search_1.change(regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table)
    search_1_v1.change(
        regex_table, inputs=[rewardbench_table_hidden_v1, search_1_v1, model_types_1_v1], outputs=rewardbench_table_v1
    )

    model_types_1.change(
        regex_table, inputs=[rewardbench_table_hidden, search_1, model_types_1], outputs=rewardbench_table
    )
    model_types_1_v1.change(
        regex_table, inputs=[rewardbench_table_hidden_v1, search_1_v1, model_types_1_v1], outputs=rewardbench_table_v1
    )

    with gr.Row():
        with gr.Accordion("📚 Citation", open=False):
            citation_button = gr.Textbox(
                value=r"""@misc{RewardBench2,
    title={RewardBench 2: Advancing Reward Model Evaluation},
    author={Malik, Saumya and Pyatkin, Valentina and Land, Sander and Morrison, Jacob and Smith, Noah A. and Hajishirzi, Hannaneh and Lambert, Nathan},
    year={2025},
    howpublished={\url{https://huggingface.co/spaces/allenai/reward-bench}},
}

@misc{RewardBench,
    title={RewardBench: Evaluating Reward Models for Language Modeling},
    author={Lambert, Nathan and Pyatkin, Valentina and Morrison, Jacob and Miranda, LJ and Lin, Bill Yuchen and Chandu, Khyathi and Dziri, Nouha and Kumar, Sachin and Zick, Tom and Choi, Yejin and Smith, Noah A. and Hajishirzi, Hannaneh},
    year={2024},
    howpublished={\url{https://huggingface.co/spaces/allenai/reward-bench}
}""",
                lines=7,
                label="Copy the following to cite these results.",
                elem_id="citation-button",
                show_copy_button=True,
            )

app.launch(allowed_paths=[str(assets)])  # had .queue() before launch before... not sure if that's necessary