File size: 5,170 Bytes
2c21cf7 4d13673 9002fc2 2c21cf7 9002fc2 d91b022 2c21cf7 d91b022 2f9dee1 4d13673 9002fc2 eb1696c 9002fc2 9c2c019 913253a 9c2c019 549360a 9c2c019 eb1696c b1e5b40 9002fc2 941d5c5 eb1696c 1e8952a 53d2039 b1e5b40 eb1696c 9002fc2 eb1696c 9002fc2 eb1696c 9c2c019 53d2039 b1e5b40 eb1696c 2c21cf7 d91b022 9002fc2 d91b022 eb1696c 4d13673 566c57e 92d8154 566c57e 32d50b0 2c21cf7 9002fc2 ebaf279 2c21cf7 4d13673 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
import json
import os
import numpy as np
import pandas as pd
import uvicorn
from countries import make_country_table
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
scores = pd.read_json("results.json")
languages = pd.read_json("languages.json")
models = pd.read_json("models.json")
def mean(lst):
return sum(lst) / len(lst) if lst else None
task_metrics = [
"translation_from_bleu",
"translation_to_bleu",
"classification_accuracy",
"mmlu_accuracy",
"mgsm_accuracy",
]
task_metrics_basic = ["translation_from_bleu", "translation_to_bleu", "classification_accuracy"]
def compute_normalized_average(df, metrics):
"""Compute average of min-max normalized metric columns."""
normalized_df = df[metrics].copy()
for col in metrics:
if col in normalized_df.columns:
col_min = normalized_df[col].min()
col_max = normalized_df[col].max()
if col_max > col_min: # Avoid division by zero
normalized_df[col] = (normalized_df[col] - col_min) / (col_max - col_min)
else:
normalized_df[col] = 0 # If all values are the same, set to 0
return normalized_df.mean(axis=1, skipna=False)
def make_model_table(df, models):
df = (
df.groupby(["model", "task", "metric"])
.agg({"score": "mean", "bcp_47": "nunique"})
.reset_index()
)
df["task_metric"] = df["task"] + "_" + df["metric"]
df = df.drop(columns=["task", "metric"])
df = df.pivot(index="model", columns="task_metric", values="score")
for metric in task_metrics:
if metric not in df.columns:
df[metric] = np.nan
df["average"] = compute_normalized_average(df, task_metrics_basic)
df = df.sort_values(by="average", ascending=False).reset_index()
df = pd.merge(df, models, left_on="model", right_on="id", how="left")
df["rank"] = df.index + 1
df = df[
[
"rank",
"model",
"name",
"provider_name",
"hf_id",
"creation_date",
"size",
"type",
"license",
"cost",
"average",
*task_metrics,
]
]
return df
def make_language_table(df, languages):
df = (
df.groupby(["bcp_47", "task", "metric"])
.agg({"score": "mean", "model": "nunique"})
.reset_index()
)
df["task_metric"] = df["task"] + "_" + df["metric"]
df = df.drop(columns=["task", "metric"])
df = df.pivot(index="bcp_47", columns="task_metric", values="score").reset_index()
for metric in task_metrics:
if metric not in df.columns:
df[metric] = np.nan
df["average"] = compute_normalized_average(df, task_metrics_basic)
df = pd.merge(languages, df, on="bcp_47", how="outer")
df = df.sort_values(by="speakers", ascending=False)
df = df[
[
"bcp_47",
"language_name",
"autonym",
"speakers",
"family",
"average",
"in_benchmark",
*task_metrics,
]
]
return df
app = FastAPI()
app.add_middleware(CORSMiddleware, allow_origins=["*"])
app.add_middleware(GZipMiddleware, minimum_size=1000)
def serialize(df):
return df.replace({np.nan: None}).to_dict(orient="records")
@app.post("/api/data")
async def data(request: Request):
body = await request.body()
data = json.loads(body)
selected_languages = data.get("selectedLanguages", {})
df = scores.groupby(["model", "bcp_47", "task", "metric"]).mean().reset_index()
# lang_results = pd.merge(languages, lang_results, on="bcp_47", how="outer")
language_table = make_language_table(df, languages)
datasets_df = pd.read_json("datasets.json")
if selected_languages:
# the filtering is only applied for the model table and the country data
df = df[df["bcp_47"].isin(lang["bcp_47"] for lang in selected_languages)]
if len(df) == 0:
model_table = pd.DataFrame()
countries = pd.DataFrame()
else:
model_table = make_model_table(df, models)
countries = make_country_table(make_language_table(df, languages))
all_tables = {
"model_table": serialize(model_table),
"language_table": serialize(language_table),
"dataset_table": serialize(datasets_df),
"countries": serialize(countries),
}
return JSONResponse(content=all_tables)
# Only serve static files if build directory exists (production mode)
if os.path.exists("frontend/build"):
app.mount("/", StaticFiles(directory="frontend/build", html=True), name="frontend")
else:
print("π§ͺ Development mode: frontend/build directory not found")
print("π Frontend should be running on http://localhost:3000")
print("π‘ API available at http://localhost:8000/api/data")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("PORT", 8000)))
|