ChatBot / app.py
dat257's picture
Update app.py
26c5e44 verified
raw
history blame
2.21 kB
import logging
from flask import Flask, request, jsonify
from transformers import pipeline
import gradio as gr
import os
import torch
from huggingface_hub import login
# Cấu hình logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Đăng nhập Hugging Face
try:
login(token=os.getenv("HF_TOKEN"))
logging.info("Logged in to Hugging Face Hub successfully")
except Exception as e:
logging.error(f"Failed to login to Hugging Face Hub: {e}")
raise
# Load mô hình
logging.info("Loading nguyenvulebinh/vi-mrc-base...")
try:
qa_pipeline = pipeline(
"question-answering",
model="nguyenvulebinh/vi-mrc-base",
device=0 if torch.cuda.is_available() else -1
)
logging.info("Model loaded successfully")
except Exception as e:
logging.error(f"Failed to load model: {e}")
raise
# Hàm xử lý cho Gradio và API
def gradio_answer(question, context):
result = qa_pipeline(question=question, context=context)
return result["answer"]
# Tạo Flask app để thêm endpoint API
app = Flask(__name__)
@app.route("/api/answer", methods=["POST"])
def answer():
try:
data = request.json
question = data.get("question")
context = data.get("context")
logging.info(f"Received request - Question: {question}, Context: {context[:200]}...")
if not question or not context:
logging.error("Missing question or context")
return jsonify({"error": "Missing question or context"}), 400
result = qa_pipeline(question=question, context=context)
logging.info(f"Response - Answer: {result['answer']}")
return jsonify({"answer": result["answer"]})
except Exception as e:
logging.error(f"API error: {e}")
return jsonify({"error": str(e)}), 500
# Tạo Gradio interface
iface = gr.Interface(
fn=gradio_answer,
inputs=["text", "text"],
outputs="text",
title="AgriBot: Hỏi đáp nông nghiệp",
description="Nhập câu hỏi và ngữ cảnh để nhận câu trả lời về nông nghiệp."
)
# Thêm Flask app vào Gradio
iface.launch(server_name="0.0.0.0", server_port=7860, app=app)