import gradio as gr import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('NlpHUST/gpt2-vietnamese') model = GPT2LMHeadModel.from_pretrained('NlpHUST/gpt2-vietnamese') # max_length = 100 def run(text, intensity): res="" max_length=intensity input_ids = tokenizer.encode(text, return_tensors='pt') sample_outputs = model.generate(input_ids,pad_token_id=tokenizer.eos_token_id, do_sample=True, max_length=max_length, min_length=max_length, top_k=40, num_beams=5, early_stopping=True, no_repeat_ngram_size=2, num_return_sequences=3) for i, sample_output in enumerate(sample_outputs): res +="Generated text {}\n\n{}".format(i+1, tokenizer.decode(sample_output.tolist())) res +='\n---' return res demo = gr.Interface( fn=run, inputs=["text", "slider"], outputs=["text"], ) demo.launch()