|
import logging |
|
from flask import Flask, request, jsonify |
|
from transformers import pipeline |
|
import gradio as gr |
|
import os |
|
import torch |
|
from huggingface_hub import login |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
|
|
|
login(token="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") |
|
|
|
app = Flask(__name__) |
|
|
|
|
|
logging.info("Loading nguyenvulebinh/vi-mrc-base...") |
|
qa_pipeline = pipeline( |
|
"question-answering", |
|
model="nguyenvulebinh/vi-mrc-base", |
|
device=0 if torch.cuda.is_available() else -1 |
|
) |
|
|
|
@app.route("/api/answer", methods=["POST"]) |
|
def answer(): |
|
try: |
|
data = request.json |
|
question = data.get("question") |
|
context = data.get("context") |
|
if not question or not context: |
|
return jsonify({"error": "Missing question or context"}), 400 |
|
result = qa_pipeline(question=question, context=context) |
|
logging.info(f"Question: {question}, Answer: {result['answer']}") |
|
return jsonify({"answer": result["answer"]}) |
|
except Exception as e: |
|
logging.error(f"API error: {e}") |
|
return jsonify({"error": str(e)}), 500 |
|
|
|
|
|
def gradio_answer(question, context): |
|
result = qa_pipeline(question=question, context=context) |
|
return result["answer"] |
|
|
|
iface = gr.Interface( |
|
fn=gradio_answer, |
|
inputs=["text", "text"], |
|
outputs="text", |
|
title="AgriBot: Hỏi đáp nông nghiệp", |
|
description="Nhập câu hỏi và ngữ cảnh để nhận câu trả lời về nông nghiệp." |
|
) |
|
|
|
if __name__ == "__main__": |
|
app.run(host="0.0.0.0", port=7860) |