rahul7star's picture
Create app.py
6880433 verified
raw
history blame
813 Bytes
import gradio as gr
from transformers import BertForQuestionAnswering, BertTokenizer, pipeline
# Load your fine-tuned model and tokenizer
model = BertForQuestionAnswering.from_pretrained("RahulTextLLM") # Path to the fine-tuned model
tokenizer = BertTokenizer.from_pretrained("RahulTextLLM") # Path to the tokenizer
# Initialize the custom pipeline with your fine-tuned model
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
# Function to get the answer using the fine-tuned model
def answer_question(context, question):
result = qa_pipeline({'context': context, 'question': question})
return result['answer']
# Define the Gradio interface
interface = gr.Interface(fn=answer_question, inputs=["text", "text"], outputs="text")
# Launch the interface
interface.launch()