Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
from peft import PeftModel | |
# Load base model and LoRA fine-tuned model | |
base_model = AutoModelForSeq2SeqLM.from_pretrained("google/byt5-small") | |
model = PeftModel.from_pretrained(base_model, "rihebriri/byt5_lora_finetuned") | |
tokenizer = AutoTokenizer.from_pretrained("google/byt5-small") | |
# Define function to correct text | |
def correct_text(text): | |
inputs = tokenizer(text, return_tensors="pt").input_ids | |
outputs = model.generate(inputs) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Create Gradio interface | |
iface = gr.Interface(fn=correct_text, inputs="text", outputs="text") | |
# Launch API | |
iface.launch() | |