Spaces:
Sleeping
Sleeping
import gradio as gr | |
import spaces | |
import torch | |
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM | |
model_id = "textcleanlm/textclean-4B" | |
model = None | |
tokenizer = None | |
def load_model(): | |
global model, tokenizer | |
if model is None: | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
# Add padding token if needed | |
if tokenizer.pad_token is None: | |
tokenizer.pad_token = tokenizer.eos_token | |
# Try different model classes | |
for model_class in [AutoModelForSeq2SeqLM, AutoModelForCausalLM, AutoModel]: | |
try: | |
model = model_class.from_pretrained( | |
model_id, | |
torch_dtype=torch.bfloat16, | |
device_map="auto" | |
) | |
break | |
except: | |
continue | |
if model is None: | |
raise ValueError(f"Could not load model {model_id}") | |
return model, tokenizer | |
def clean_text(text): | |
model, tokenizer = load_model() | |
inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True) | |
inputs = {k: v.cuda() for k, v in inputs.items()} | |
with torch.no_grad(): | |
outputs = model.generate( | |
**inputs, | |
max_length=512, | |
num_beams=4, | |
early_stopping=True | |
) | |
cleaned_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return cleaned_text | |
iface = gr.Interface( | |
fn=clean_text, | |
inputs=gr.Textbox( | |
lines=5, | |
placeholder="Enter text to clean...", | |
label="Input Text" | |
), | |
outputs=gr.Textbox( | |
lines=5, | |
label="Cleaned Text" | |
), | |
title="TextClean-4B Demo", | |
description="Simple demo for text cleaning using textcleanlm/textclean-4B model" | |
) | |
if __name__ == "__main__": | |
iface.launch() |