Spaces:
Sleeping
Sleeping
File size: 813 Bytes
6880433 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
import gradio as gr
from transformers import BertForQuestionAnswering, BertTokenizer, pipeline
# Load your fine-tuned model and tokenizer
model = BertForQuestionAnswering.from_pretrained("RahulTextLLM") # Path to the fine-tuned model
tokenizer = BertTokenizer.from_pretrained("RahulTextLLM") # Path to the tokenizer
# Initialize the custom pipeline with your fine-tuned model
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
# Function to get the answer using the fine-tuned model
def answer_question(context, question):
result = qa_pipeline({'context': context, 'question': question})
return result['answer']
# Define the Gradio interface
interface = gr.Interface(fn=answer_question, inputs=["text", "text"], outputs="text")
# Launch the interface
interface.launch()
|