ChatBot / app.py
dat257's picture
Update app.py
c654a7e verified
raw
history blame
1.85 kB
import logging
from flask import Flask, request, jsonify
from transformers import pipeline
import gradio as gr
import os
import torch
from huggingface_hub import login
# Cấu hình logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Đăng nhập Hugging Face
login(token=os.getenv("HF_TOKEN")) # Lấy token từ biến môi trường
app = Flask(__name__)
# Load mô hình
logging.info("Loading nguyenvulebinh/vi-mrc-base...")
qa_pipeline = pipeline(
"question-answering",
model="nguyenvulebinh/vi-mrc-base",
device=0 if torch.cuda.is_available() else -1
)
@app.route("/api/answer", methods=["POST"])
def answer():
try:
data = request.json
question = data.get("question")
context = data.get("context")
logging.info(f"Received request - Question: {question}, Context: {context[:200]}...")
if not question or not context:
logging.error("Missing question or context")
return jsonify({"error": "Missing question or context"}), 400
result = qa_pipeline(question=question, context=context)
logging.info(f"Response - Answer: {result['answer']}")
return jsonify({"answer": result["answer"]})
except Exception as e:
logging.error(f"API error: {e}")
return jsonify({"error": str(e)}), 500
# Giao diện Gradio để thử nghiệm
def gradio_answer(question, context):
result = qa_pipeline(question=question, context=context)
return result["answer"]
iface = gr.Interface(
fn=gradio_answer,
inputs=["text", "text"],
outputs="text",
title="AgriBot: Hỏi đáp nông nghiệp",
description="Nhập câu hỏi và ngữ cảnh để nhận câu trả lời về nông nghiệp."
)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7860)