File size: 898 Bytes
6a61342
a0024c9
3f4ce15
 
a0024c9
3f4ce15
a0024c9
 
 
3f4ce15
a0024c9
 
3f4ce15
 
 
6a61342
 
a0024c9
 
3f4ce15
a0024c9
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import os
from fastapi import FastAPI, Request
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch

# Đặt thư mục cache có quyền ghi
os.makedirs("/tmp/hf-cache", exist_ok=True)
os.environ["TRANSFORMERS_CACHE"] = "/tmp/hf-cache"

# Sử dụng model công khai
model_name = "VietAI/vit5-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)

app = FastAPI()

class InputData(BaseModel):
    input: str

@app.post("/predict")
async def predict(request: Request, data: InputData):
    input_ids = tokenizer.encode(data.input, return_tensors="pt", max_length=512, truncation=True)
    output_ids = model.generate(input_ids, max_length=128, num_beams=4, early_stopping=True)
    output = tokenizer.decode(output_ids[0], skip_special_tokens=True)
    return {"output": output}