qa_roberta / app.py
fsaglam2002's picture
Update app.py
c279e3a verified
raw
history blame
744 Bytes
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import gradio as grad
import ast
_model = "deepset/roberta-base-squad2"
_pipeline = pipeline("question-answering", model = _model, tokenizer = _model)
def answer_question(question, context):
text = "{" + 'question': '"+question+"', 'context':'"+context+"'}"
di = ast.literal_eval(text)
response = _pipeline(di)
return response
grad.Interface(answer_question, inputs=["text", "text"], outputs="text").launch()
text = grad.inputs.Textbox(placeholder="Lets chat together")
grad.Interface (fn=converse,
theme="default",
inputs=[text, "state"],
outputs=["html", "state"],
css=css).launch()