|
import os |
|
import gradio as gr |
|
from gradio import ChatMessage |
|
import torch |
|
import torch._dynamo |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from huggingface_hub import hf_hub_download |
|
import re |
|
from llama_cpp import Llama |
|
from typing import Iterator |
|
import spaces |
|
|
|
|
|
torch._dynamo.config.suppress_errors = True |
|
torch._dynamo.disable() |
|
|
|
|
|
MODEL_ID = "somosnlp-hackathon-2025/iberotales-gemma-3-1b-it-es" |
|
GGUF_MODEL_ID = "somosnlp-hackathon-2025/iberotales-gemma-3-1b-it-es-finetune-gguf" |
|
GGUF_FILENAME = "gemma-3-finetune.Q8_0.gguf" |
|
GGUF_REVISION = "main" |
|
MAX_MAX_NEW_TOKENS = 2048 |
|
DEFAULT_MAX_NEW_TOKENS = 1024 |
|
|
|
|
|
DEFAULT_SYSTEM_MESSAGE = """Resuelve el siguiente problema. |
|
Primero, piensa en voz alta qué debes hacer, paso por paso y de forma resumida, entre <think> y </think>. |
|
Luego, da la respuesta final entre <SOLUTION> y </SOLUTION>. |
|
No escribas nada fuera de ese formato.""" |
|
|
|
|
|
PERSONAJES_POR_PAIS = { |
|
"🇦🇷 Argentina": [ |
|
{"nombre": "La Difunta Correa", "imagen": "images/ar1.jpg", "descripcion": "Santa popular que murió de sed siguiendo a su esposo reclutado"}, |
|
{"nombre": "El Lobizón", "imagen": "images/ar2.jpg", "descripcion": "Hombre lobo de la tradición gaucha, séptimo hijo varón maldito"} |
|
], |
|
"🇧🇴 Bolivia": [ |
|
{"nombre": "El Tío del Cerro Rico", "imagen": "images/bo1.webp", "descripcion": "Señor de las minas que protege y castiga a los mineros"}, |
|
{"nombre": "El Ekeko", "imagen": "images/bo2.jpg", "descripcion": "Dios aymara de la abundancia y la fortuna con jorobas"} |
|
], |
|
"🇵🇾 Paraguay": [ |
|
{"nombre": "Kurupi", "imagen": "images/py1.jpg", "descripcion": "Dios guaraní de la fertilidad con poderes sobrenaturales"}, |
|
{"nombre": "Pombero", "imagen": "images/py2.jpg", "descripcion": "Duende protector de los animales y la naturaleza"} |
|
] |
|
} |
|
|
|
|
|
model = None |
|
tokenizer = None |
|
current_personajes = [] |
|
|
|
|
|
custom_css = """ |
|
.gradio-container { |
|
max-width: 1400px !important; |
|
margin: auto; |
|
padding-top: 1.5rem; |
|
} |
|
|
|
#galeria .grid-wrap { |
|
max-height: 350px; |
|
overflow-y: auto; |
|
} |
|
|
|
#galeria .grid-container { |
|
grid-template-columns: repeat(1, 1fr) !important; |
|
gap: 0.5rem; |
|
} |
|
|
|
#galeria .thumbnail-item { |
|
aspect-ratio: 1; |
|
max-height: 100px; |
|
} |
|
|
|
#galeria .thumbnail-item img { |
|
object-fit: cover; |
|
width: 100%; |
|
height: 100%; |
|
border-radius: 8px; |
|
} |
|
|
|
.header-info { |
|
background: linear-gradient(135deg, #2c3e50 0%, #1a1a2e 100%); |
|
color: white; |
|
padding: 1rem; |
|
border-radius: 12px; |
|
margin-bottom: 1rem; |
|
text-align: center; |
|
} |
|
""" |
|
|
|
def load_model(): |
|
"""Cargar modelo GGUF""" |
|
global model, tokenizer |
|
|
|
try: |
|
|
|
print("Descargando modelo GGUF...") |
|
model_path = hf_hub_download( |
|
repo_id=GGUF_MODEL_ID, |
|
filename=GGUF_FILENAME, |
|
revision=GGUF_REVISION, |
|
local_dir="./models", |
|
) |
|
|
|
print(f"Modelo descargado en: {model_path}") |
|
|
|
|
|
model = Llama( |
|
model_path=model_path, |
|
n_ctx=2048, |
|
n_batch=512, |
|
n_threads=4, |
|
n_gpu_layers=0, |
|
chat_format="gemma" |
|
) |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b-it") |
|
|
|
print("Modelo cargado exitosamente") |
|
return True |
|
|
|
except Exception as e: |
|
print(f"Error al cargar modelo: {e}") |
|
return False |
|
|
|
def generate_response( |
|
user_message: str, |
|
system_message: str = DEFAULT_SYSTEM_MESSAGE, |
|
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, |
|
temperature: float = 0.7, |
|
top_p: float = 0.95, |
|
): |
|
"""Genera respuesta usando el modelo GGUF""" |
|
global model |
|
|
|
if model is None: |
|
return "Error: Modelo no disponible." |
|
|
|
try: |
|
|
|
messages = [ |
|
{"role": "system", "content": system_message}, |
|
{"role": "user", "content": user_message} |
|
] |
|
|
|
|
|
response = model.create_chat_completion( |
|
messages=messages, |
|
max_tokens=max_new_tokens, |
|
temperature=temperature, |
|
top_p=top_p, |
|
stream=False |
|
) |
|
|
|
full_response = response['choices'][0]['message']['content'] |
|
|
|
|
|
thinking_part = "" |
|
solution_part = "" |
|
|
|
|
|
think_match = re.search(r'<think>(.*?)</think>', full_response, re.DOTALL) |
|
if think_match: |
|
thinking_part = think_match.group(1).strip() |
|
|
|
|
|
solution_match = re.search(r'<SOLUTION>(.*?)</SOLUTION>', full_response, re.DOTALL) |
|
if solution_match: |
|
solution_part = solution_match.group(1).strip() |
|
|
|
|
|
messages = [] |
|
|
|
if thinking_part: |
|
messages.append(ChatMessage( |
|
role="assistant", |
|
content=thinking_part, |
|
metadata={"title": "🤔 Pensando..."} |
|
)) |
|
|
|
if solution_part: |
|
messages.append(ChatMessage( |
|
role="assistant", |
|
content=solution_part |
|
)) |
|
elif not thinking_part: |
|
|
|
clean_response = re.sub(r'<think>.*?</think>', '', full_response, flags=re.DOTALL) |
|
clean_response = re.sub(r'<SOLUTION>(.*?)</SOLUTION>', r'\1', clean_response, flags=re.DOTALL) |
|
messages.append(ChatMessage( |
|
role="assistant", |
|
content=clean_response.strip() |
|
)) |
|
|
|
return messages |
|
|
|
except Exception as e: |
|
return [ChatMessage(role="assistant", content=f"Error: {str(e)}")] |
|
|
|
def actualizar_personajes(pais_seleccionado): |
|
"""Actualiza la galería de personajes según el país seleccionado""" |
|
global current_personajes |
|
personajes = PERSONAJES_POR_PAIS.get(pais_seleccionado, []) |
|
current_personajes = personajes |
|
|
|
if not personajes: |
|
return [], "Selecciona un país para ver sus personajes" |
|
|
|
|
|
imagenes = [] |
|
for p in personajes: |
|
if os.path.exists(p["imagen"]): |
|
imagenes.append(p["imagen"]) |
|
else: |
|
|
|
imagenes.append("https://via.placeholder.com/100x100.png?text=No+Image") |
|
|
|
return imagenes, f"Personajes de {pais_seleccionado}" |
|
|
|
def crear_prompt_desde_personaje(evt: gr.SelectData): |
|
"""Crea un prompt basado en el personaje seleccionado""" |
|
global current_personajes |
|
|
|
try: |
|
if evt.index is not None and evt.index < len(current_personajes): |
|
personaje = current_personajes[evt.index] |
|
return f"Crea una historia sobre {personaje['nombre']}, {personaje['descripcion']}" |
|
else: |
|
return "Crea una historia sobre un personaje mítico" |
|
except Exception as e: |
|
print(f"Error al crear prompt: {e}") |
|
return "Crea una historia sobre un personaje mítico" |
|
|
|
def chat_function(message, history, max_tokens, temperature): |
|
"""Función principal del chat""" |
|
if not message.strip(): |
|
return history, "" |
|
|
|
|
|
history = history + [ChatMessage(role="user", content=message)] |
|
|
|
|
|
response_messages = generate_response(message, DEFAULT_SYSTEM_MESSAGE, max_tokens, temperature) |
|
|
|
|
|
for msg in response_messages: |
|
history = history + [msg] |
|
|
|
return history, "" |
|
|
|
|
|
print("Iniciando carga del modelo...") |
|
model_loaded = load_model() |
|
|
|
|
|
with gr.Blocks(title="Iberotales", css=custom_css) as demo: |
|
|
|
gr.HTML(""" |
|
<div class="header-info"> |
|
<h1>📚 Iberotales</h1> |
|
<p><strong>Autor:</strong> David Quispe | <a href="https://github.com/mcdaqc/Iberotales" target="_blank" style="text-decoration: none;">GitHub</a> | <a href="https://huggingface.co/somosnlp-hackathon-2025/iberotales-gemma-3-1b-it-es" target="_blank" style="text-decoration: none;">Modelo</a> | <a href="https://huggingface.co/somosnlp-hackathon-2025/iberotales-gemma-3-1b-it-es-finetune-gguf" target="_blank" style="text-decoration: none;">GGUF</a></p> |
|
<p><em>Alineando modelos de lenguaje con la narrativa de mitos y leyendas de Iberoamérica.</em></p> |
|
<p><em>Hackathon SomosNLP 2025</em></p> |
|
</div> |
|
""") |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(scale=1, min_width=320): |
|
gr.Markdown("### 🗃️ Pokédex de Personajes") |
|
|
|
pais_dropdown = gr.Dropdown( |
|
choices=list(PERSONAJES_POR_PAIS.keys()), |
|
value="🇵🇾 Paraguay", |
|
label="País", |
|
container=False |
|
) |
|
|
|
galeria_personajes = gr.Gallery( |
|
value=[], |
|
label="Personajes", |
|
show_label=False, |
|
elem_id="galeria", |
|
columns=1, |
|
rows=4, |
|
height=350 |
|
) |
|
|
|
|
|
with gr.Column(scale=2): |
|
chatbot = gr.Chatbot( |
|
type="messages", |
|
show_label=False, |
|
height=400, |
|
avatar_images=(None, "🏛️"), |
|
value=[] |
|
) |
|
|
|
with gr.Row(): |
|
input_box = gr.Textbox( |
|
placeholder="Escribe tu historia o selecciona un personaje...", |
|
show_label=False, |
|
scale=4, |
|
container=False |
|
) |
|
send_button = gr.Button("📤", scale=1, variant="primary") |
|
|
|
with gr.Row(): |
|
clear_button = gr.Button("🗑️ Limpiar", scale=1, size="sm") |
|
|
|
with gr.Column(scale=3): |
|
with gr.Row(): |
|
max_tokens = gr.Slider(100, MAX_MAX_NEW_TOKENS, DEFAULT_MAX_NEW_TOKENS, label="Tokens", container=False) |
|
temperature = gr.Slider(0.1, 2.0, 0.7, label="Temp", container=False) |
|
|
|
|
|
|
|
pais_dropdown.change( |
|
fn=actualizar_personajes, |
|
inputs=[pais_dropdown], |
|
outputs=[galeria_personajes] |
|
) |
|
|
|
|
|
demo.load( |
|
fn=actualizar_personajes, |
|
inputs=[pais_dropdown], |
|
outputs=[galeria_personajes] |
|
) |
|
|
|
|
|
galeria_personajes.select( |
|
fn=crear_prompt_desde_personaje, |
|
outputs=[input_box] |
|
) |
|
|
|
|
|
input_box.submit( |
|
fn=chat_function, |
|
inputs=[input_box, chatbot, max_tokens, temperature], |
|
outputs=[chatbot, input_box] |
|
) |
|
|
|
send_button.click( |
|
fn=chat_function, |
|
inputs=[input_box, chatbot, max_tokens, temperature], |
|
outputs=[chatbot, input_box] |
|
) |
|
|
|
clear_button.click( |
|
fn=lambda: ([], ""), |
|
outputs=[chatbot, input_box], |
|
queue=False |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
if model_loaded: |
|
print("Lanzando aplicación...") |
|
demo.launch(share=False, show_error=True) |
|
else: |
|
print("Error: No se pudo cargar el modelo. Revisa la configuración.") |