JuanCabs commited on
Commit
4e195b0
verified
1 Parent(s): aae39db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -1,22 +1,24 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Load the model from Hugging Face
5
- generator = pipeline('text-generation', model='JuanCabs/lapepav0')
 
6
 
7
- # Define the function for text generation
8
  def generate_text(prompt):
9
- results = generator(prompt, max_length=100)
10
- return results[0]['generated_text']
 
11
 
12
- # Create the Gradio interface
13
  interface = gr.Interface(
14
- fn=generate_text, # The function to execute
15
- inputs="text", # Input type
16
- outputs="text", # Output type
17
- title="Text Generation Model", # Title of the app
18
- description="Enter a prompt to generate text using a Hugging Face model" # App description
19
  )
20
 
21
- # Launch the app
22
  interface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Cargar el modelo y el tokenizer desde Hugging Face
5
+ model = AutoModelForCausalLM.from_pretrained("JuanCabs/lapepav0")
6
+ tokenizer = AutoTokenizer.from_pretrained("JuanCabs/lapepav0")
7
 
8
+ # Definir la funci贸n para generar texto
9
  def generate_text(prompt):
10
+ inputs = tokenizer(prompt, return_tensors="pt") # Tokenizar la entrada
11
+ outputs = model.generate(**inputs, max_length=100) # Generar la salida
12
+ return tokenizer.decode(outputs[0], skip_special_tokens=True) # Decodificar y devolver el texto
13
 
14
+ # Crear la interfaz de Gradio
15
  interface = gr.Interface(
16
+ fn=generate_text, # Funci贸n a ejecutar
17
+ inputs="text", # Tipo de entrada
18
+ outputs="text", # Tipo de salida
19
+ title="Text Generation Model", # T铆tulo de la app
20
+ description="Enter a prompt to generate text using a Hugging Face model" # Descripci贸n de la app
21
  )
22
 
23
+ # Lanzar la aplicaci贸n
24
  interface.launch()