HoneyTian's picture
first commit
4464055
raw
history blame
4.43 kB
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
https://docs.byteplus.com/en/docs/ModelArk/1099455
model list
https://docs.byteplus.com/en/docs/ModelArk/1330310
"""
import argparse
from datetime import datetime
import json
import os
from pathlib import Path
import sys
import time
from zoneinfo import ZoneInfo # Python 3.9+ 自带,无需安装
pwd = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(pwd, "../"))
from openai import OpenAI
from project_settings import environment, project_path
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="seedance-1-0-lite-t2v-250428",
type=str
)
parser.add_argument(
"--eval_dataset_name",
default="arc-easy-1000-choice.jsonl",
type=str
)
parser.add_argument(
"--eval_dataset_dir",
default=(project_path / "data/dataset").as_posix(),
type=str
)
parser.add_argument(
"--eval_data_dir",
default=(project_path / "data/eval_data").as_posix(),
type=str
)
parser.add_argument(
"--client",
default="shenzhen_sase",
type=str
)
parser.add_argument(
"--service",
default="byteplus_api_key",
type=str
)
args = parser.parse_args()
return args
def main():
args = get_args()
eval_dataset_dir = Path(args.eval_dataset_dir)
eval_dataset_dir.mkdir(parents=True, exist_ok=True)
eval_data_dir = Path(args.eval_data_dir)
eval_data_dir.mkdir(parents=True, exist_ok=True)
tz = ZoneInfo("Asia/Shanghai")
now = datetime.now(tz)
create_time_str = now.strftime("%Y%m%d_%H%M%S")
eval_dataset = eval_dataset_dir / args.eval_dataset_name
output_file = eval_data_dir / f"byteplus/byteplus/{args.model_name}/{args.client}/{args.service}/{create_time_str}/{args.eval_dataset_name}"
output_file.parent.mkdir(parents=True, exist_ok=True)
api_key = environment.get(args.service, dtype=str)
client = OpenAI(
base_url="https://ark.ap-southeast.bytepluses.com/api/v3",
# Read your Ark API Key from the environment variable.
api_key=api_key
)
total = 0
total_correct = 0
# finished
finished_idx_set = set()
if os.path.exists(output_file.as_posix()):
with open(output_file.as_posix(), "r", encoding="utf-8") as f:
for row in f:
row = json.loads(row)
idx = row["idx"]
total = row["total"]
total_correct = row["total_correct"]
finished_idx_set.add(idx)
print(f"finished count: {len(finished_idx_set)}")
with open(eval_dataset.as_posix(), "r", encoding="utf-8") as fin, open(output_file.as_posix(), "a+", encoding="utf-8") as fout:
for row in fin:
row = json.loads(row)
idx = row["idx"]
prompt = row["prompt"]
response = row["response"]
if idx in finished_idx_set:
continue
finished_idx_set.add(idx)
try:
time_begin = time.time()
completion = client.chat.completions.create(
# Replace with your Inference Endpoint.
model=args.model_name,
messages=[
{"role": "user", "content": prompt},
]
)
time_cost = time.time() - time_begin
print(f"time_cost: {time_cost}")
except Exception as e:
print(f"request failed, error type: {type(e)}, error text: {str(e)}")
continue
prediction = completion.choices[0].message.content
correct = 1 if prediction == response else 0
total += 1
total_correct += correct
score = total_correct / total
row_ = {
"idx": idx,
"prompt": prompt,
"response": response,
"prediction": prediction,
"correct": correct,
"total": total,
"total_correct": total_correct,
"score": score,
"time_cost": time_cost,
}
row_ = json.dumps(row_, ensure_ascii=False)
fout.write(f"{row_}\n")
return
if __name__ == "__main__":
main()