WolfInk commited on
Commit
8d30265
verified
1 Parent(s): 6cb6dda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -1,8 +1,18 @@
 
 
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Cargar el modelo desde Hugging Face Hub
5
- generator = pipeline("text-generation", model="WolfInk/GPT-1.5-High")
 
 
 
 
 
 
 
 
 
6
 
7
  def generate_response(prompt, max_tokens, temperature, top_p, top_k, repetition_penalty):
8
  output = generator(
 
1
+ from transformers import GPT2Config, GPT2LMHeadModel
2
+ from transformers import CONFIG_MAPPING, MODEL_MAPPING, pipeline
3
  import gradio as gr
 
4
 
5
+ # Paso 1: Registrar el modelo
6
+ class GPT1_5HighConfig(GPT2Config):
7
+ def __init__(self, **kwargs):
8
+ super().__init__(**kwargs)
9
+
10
+ # Registrar la configuraci贸n personalizada en los diccionarios de Transformers
11
+ CONFIG_MAPPING["gpt1_5high"] = GPT1_5HighConfig
12
+ MODEL_MAPPING[GPT1_5HighConfig] = GPT2LMHeadModel # Usa el modelo adecuado
13
+
14
+ # Paso 2: Crear el pipeline de generaci贸n de texto
15
+ generator = pipeline("text-generation", model="WolfInk/GPT-1.5-High", config="gpt1_5high")
16
 
17
  def generate_response(prompt, max_tokens, temperature, top_p, top_k, repetition_penalty):
18
  output = generator(