|
import logging |
|
from transformers import pipeline |
|
import os |
|
from huggingface_hub import login |
|
from fastapi import FastAPI, Request |
|
from fastapi.responses import JSONResponse |
|
import uvicorn |
|
import torch |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
try: |
|
login(token=os.getenv("HF_TOKEN")) |
|
logging.info("Logged in to Hugging Face Hub successfully") |
|
except Exception as e: |
|
logging.error(f"Failed to login to Hugging Face Hub: {e}") |
|
raise |
|
|
|
|
|
logging.info("Loading nguyenvulebinh/vi-mrc-base...") |
|
try: |
|
qa_pipeline = pipeline( |
|
"question-answering", |
|
model="nguyenvulebinh/vi-mrc-base", |
|
device=0 if torch.cuda.is_available() else -1 |
|
) |
|
logging.info("Model loaded successfully") |
|
except Exception as e: |
|
logging.error(f"Failed to load model: {e}") |
|
raise |
|
|
|
|
|
app = FastAPI() |
|
|
|
@app.post("/api/answer") |
|
async def api_answer(request: Request): |
|
try: |
|
data = await request.json() |
|
question = data.get("question") |
|
context = data.get("context", "Cây lúa là một loại cây trồng phổ biến ở Việt Nam, cần điều kiện đất và nước phù hợp.") |
|
logging.info(f"Received request - Question: {question}, Context: {context[:200]}...") |
|
if not question: |
|
logging.error("Missing question") |
|
return JSONResponse({"error": "Missing question"}, status_code=400) |
|
result = qa_pipeline(question=question, context=context) |
|
logging.info(f"Response - Answer: {result['answer']}") |
|
return JSONResponse({"answer": result["answer"]}) |
|
except Exception as e: |
|
logging.error(f"API error: {e}") |
|
return JSONResponse({"error": str(e)}, status_code=500) |
|
|
|
if __name__ == "__main__": |
|
logging.info("Starting FastAPI...") |
|
uvicorn.run(app, host="0.0.0.0") |