vllm-inference / download_model.py
yusufs's picture
feat(hf_token): set hf token during build
493a5f1
raw
history blame
384 Bytes
import os
from huggingface_hub import snapshot_download
hf_token: str = os.getenv("HF_TOKEN")
if hf_token is None:
raise ValueError("HF_TOKEN is not set")
hf_token = hf_token.strip()
if hf_token == "":
raise ValueError("HF_TOKEN is empty")
snapshot_download(
repo_id="sail/Sailor-4B-Chat",
revision="89a866a7041e6ec023dd462adeca8e28dd53c83e",
token=hf_token,
)