gnosticdev commited on
Commit
b0e62d9
·
verified ·
1 Parent(s): 18e4b7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -103
app.py CHANGED
@@ -13,35 +13,28 @@ from moviepy.editor import (
13
  )
14
 
15
  # ------------------- Configuración & Globals -------------------
16
- logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
17
  logger = logging.getLogger(__name__)
18
 
19
  PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
20
  if not PEXELS_API_KEY:
21
  raise RuntimeError("Debes definir PEXELS_API_KEY en 'Settings' -> 'Variables & secrets'")
22
 
23
- # --- Modelos inicializados como None para Carga Perezosa (Lazy Loading) ---
24
- tokenizer = None
25
- gpt2_model = None
26
- kw_model = None
27
- # ---
28
-
29
  RESULTS_DIR = "video_results"
30
  os.makedirs(RESULTS_DIR, exist_ok=True)
31
- TASKS = {}
32
 
33
- # --- Lista de Voces Fija para un Arranque Instantáneo ---
34
  SPANISH_VOICES = [
35
  "es-ES-ElviraNeural", "es-ES-AlvaroNeural", "es-MX-DaliaNeural", "es-MX-JorgeNeural",
36
- "es-AR-ElenaNeural", "es-AR-TomasNeural", "es-CO-SalomeNeural", "es-CO-GonzaloNeural",
37
- "es-US-PalomaNeural", "es-US-AlonsoNeural"
38
  ]
39
 
40
- # ------------------- Funciones para cargar modelos bajo demanda -------------------
41
  def get_tokenizer():
42
  global tokenizer
43
  if tokenizer is None:
44
- logger.info("Cargando tokenizer por primera vez...")
45
  tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
46
  if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token
47
  return tokenizer
@@ -49,30 +42,36 @@ def get_tokenizer():
49
  def get_gpt2_model():
50
  global gpt2_model
51
  if gpt2_model is None:
52
- logger.info("Cargando modelo GPT-2 por primera vez...")
53
  gpt2_model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish").eval()
54
  return gpt2_model
55
 
56
  def get_kw_model():
57
  global kw_model
58
  if kw_model is None:
59
- logger.info("Cargando modelo KeyBERT por primera vez...")
60
  kw_model = KeyBERT("distilbert-base-multilingual-cased")
61
  return kw_model
62
 
63
  # ------------------- Funciones del Pipeline de Vídeo -------------------
64
- def gpt2_script(prompt: str, max_len: int = 160) -> str:
 
 
 
 
 
 
65
  local_tokenizer = get_tokenizer()
66
  local_gpt2_model = get_gpt2_model()
67
  instruction = f"Escribe un guion corto y coherente sobre: {prompt}"
68
  inputs = local_tokenizer(instruction, return_tensors="pt", truncation=True, max_length=512)
69
  outputs = local_gpt2_model.generate(
70
- **inputs, max_length=max_len + inputs["input_ids"].shape[1], do_sample=True,
71
  top_p=0.9, top_k=40, temperature=0.7, no_repeat_ngram_size=3,
72
  pad_token_id=local_tokenizer.pad_token_id, eos_token_id=local_tokenizer.eos_token_id,
73
  )
74
  text = local_tokenizer.decode(outputs[0], skip_special_tokens=True)
75
- return text.split("sobre:")[-1].strip()[:max_len]
76
 
77
  async def edge_tts_synth(text: str, voice: str, path: str):
78
  communicate = edge_tts.Communicate(text, voice)
@@ -81,21 +80,12 @@ async def edge_tts_synth(text: str, voice: str, path: str):
81
  def keywords(text: str) -> list[str]:
82
  local_kw_model = get_kw_model()
83
  clean_text = re.sub(r"[^\w\sáéíóúñÁÉÍÓÚÑ]", "", text.lower())
84
- try:
85
- kws = local_kw_model.extract_keywords(clean_text, stop_words="spanish", top_n=5)
86
- return [k.replace(" ", "+") for k, _ in kws if k]
87
- except Exception as e:
88
- logger.warning(f"KeyBERT falló, usando método simple. Error: {e}")
89
- words = [w for w in clean_text.split() if len(w) > 4]
90
- return [w for w, _ in Counter(words).most_common(5)] or ["naturaleza"]
91
 
92
  def pexels_search(query: str, count: int) -> list[dict]:
93
- res = requests.get(
94
- "https://api.pexels.com/videos/search",
95
- headers={"Authorization": PEXELS_API_KEY},
96
- params={"query": query, "per_page": count, "orientation": "landscape"},
97
- timeout=20,
98
- )
99
  res.raise_for_status()
100
  return res.json().get("videos", [])
101
 
@@ -127,14 +117,10 @@ def make_subtitle_clips(script: str, video_w: int, video_h: int, duration: float
127
  num_words = len(sentence.split())
128
  sentence_duration = num_words * time_per_word
129
  if sentence_duration < 0.1: continue
130
- txt_clip = (
131
- TextClip(sentence, fontsize=int(video_h * 0.05), color="white",
132
- stroke_color="black", stroke_width=1.5, method="caption",
133
- size=(int(video_w * 0.9), None), font="Arial-Bold")
134
- .set_start(current_time)
135
- .set_duration(sentence_duration)
136
- .set_position(("center", "bottom"))
137
- )
138
  clips.append(txt_clip)
139
  current_time += sentence_duration
140
  return clips
@@ -146,17 +132,24 @@ def make_grain_clip(size: tuple[int, int], duration: float):
146
  return np.repeat(noise, 3, axis=2)
147
  return VideoClip(make_frame, duration=duration).set_opacity(0.15)
148
 
149
- def build_video(script_text: str, generate_script_flag: bool, voice: str, music_path: str | None) -> str:
150
  tmp_dir = tempfile.mkdtemp()
151
  try:
 
152
  script = gpt2_script(script_text) if generate_script_flag else script_text.strip()
 
 
153
  voice_path = os.path.join(tmp_dir, "voice.mp3")
154
  asyncio.run(edge_tts_synth(script, voice, voice_path))
155
  voice_clip = AudioFileClip(voice_path)
156
  video_duration = voice_clip.duration
157
  if video_duration < 1: raise ValueError("El audio generado es demasiado corto.")
 
 
158
  video_paths = []
159
- for kw in keywords(script):
 
 
160
  if len(video_paths) >= 8: break
161
  for video_data in pexels_search(kw, 2):
162
  best_file = max(video_data.get("video_files", []), key=lambda f: f.get("width", 0))
@@ -165,29 +158,31 @@ def build_video(script_text: str, generate_script_flag: bool, voice: str, music_
165
  if path: video_paths.append(path)
166
  if len(video_paths) >= 8: break
167
  if not video_paths: raise RuntimeError("No se encontraron vídeos en Pexels.")
168
- segments = []
169
- for path in video_paths:
170
- try: segments.append(VideoFileClip(path))
171
- except Exception as e: logger.warning(f"No se pudo cargar el clip {path}: {e}")
172
- if not segments: raise RuntimeError("Los clips descargados no son válidos.")
173
- final_segments = [s.subclip(0, min(8, s.duration)) for s in segments]
174
- base_video = concatenate_videoclips(final_segments, method="chain")
175
  if base_video.duration < video_duration:
176
- num_loops = math.ceil(video_duration / base_video.duration)
177
- base_video = concatenate_videoclips([base_video] * num_loops, method="chain")
178
  base_video = base_video.subclip(0, video_duration)
 
 
179
  if music_path:
180
  music_clip = loop_audio(AudioFileClip(music_path), video_duration).volumex(0.20)
181
  final_audio = CompositeAudioClip([music_clip, voice_clip])
182
  else: final_audio = voice_clip
 
 
183
  subtitles = make_subtitle_clips(script, base_video.w, base_video.h, video_duration)
184
  grain_effect = make_grain_clip(base_video.size, video_duration)
 
 
185
  final_video = CompositeVideoClip([base_video, grain_effect, *subtitles]).set_audio(final_audio)
186
  output_path = os.path.join(tmp_dir, "final_video.mp4")
187
- final_video.write_videofile(output_path, fps=24, codec="libx264", audio_codec="aac", threads=2, logger=None)
 
188
  return output_path
189
  finally:
190
- # Intenta cerrar todos los clips de MoviePy para liberar memoria
191
  if 'voice_clip' in locals(): voice_clip.close()
192
  if 'music_clip' in locals(): music_clip.close()
193
  if 'base_video' in locals(): base_video.close()
@@ -198,80 +193,82 @@ def build_video(script_text: str, generate_script_flag: bool, voice: str, music_
198
  def worker(task_id: str, mode: str, topic: str, user_script: str, voice: str, music: str | None):
199
  try:
200
  text = topic if mode == "Generar Guion con IA" else user_script
201
- result_tmp_path = build_video(text, mode == "Generar Guion con IA", voice, music)
202
  final_path = os.path.join(RESULTS_DIR, f"{task_id}.mp4")
203
  shutil.copy2(result_tmp_path, final_path)
204
- TASKS[task_id] = {"status": "done", "result": final_path, "timestamp": datetime.utcnow()}
205
  shutil.rmtree(os.path.dirname(result_tmp_path))
206
  except Exception as e:
207
- logger.error(f"Error en la tarea {task_id}: {e}", exc_info=True)
208
- TASKS[task_id] = {"status": "error", "error": str(e), "timestamp": datetime.utcnow()}
209
-
210
- def submit_task(mode, topic, user_script, voice, music):
211
- content = topic if mode == "Generar Guion con IA" else user_script
212
- if not content.strip(): return "", "Por favor, ingresa un tema o guion."
213
- task_id = uuid.uuid4().hex[:8]
214
- TASKS[task_id] = {"status": "processing", "timestamp": datetime.utcnow()}
215
- threading.Thread(target=worker, args=(task_id, mode, topic, user_script, voice, music), daemon=True).start()
216
- return task_id, f"✅ Tarea creada con ID: {task_id}. Comprueba el estado en unos minutos."
217
-
218
- def check_task_status(task_id):
219
- if not task_id or task_id not in TASKS: return None, None, "ID de tarea no válido o no encontrado."
220
- task_info = TASKS[task_id]
221
- status = task_info["status"]
222
- if status == "processing": return None, None, "⏳ La tarea se está procesando..."
223
- if status == "error": return None, None, f"❌ Error: {task_info['error']}"
224
- if status == "done": return task_info["result"], task_info["result"], "✅ ¡Vídeo listo!"
225
- return None, None, "Estado desconocido."
226
 
227
  def janitor_thread():
228
  while True:
229
  time.sleep(3600)
230
  now = datetime.utcnow()
 
231
  for task_id, info in list(TASKS.items()):
232
- if now - info["timestamp"] > timedelta(hours=24):
233
- if info.get("result") and os.path.exists(info["result"]):
234
  try:
235
  os.remove(info["result"])
236
- logger.info(f"Limpiado vídeo antiguo: {info['result']}")
237
  except Exception as e:
238
- logger.error(f"Error al limpiar {info['result']}: {e}")
239
  del TASKS[task_id]
240
 
241
  threading.Thread(target=janitor_thread, daemon=True).start()
242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  with gr.Blocks(title="Generador de Vídeos IA", theme=gr.themes.Soft()) as demo:
244
  gr.Markdown("# 🎬 Generador de Vídeos con IA")
245
- gr.Markdown("Crea vídeos a partir de texto, con voz, música y efectos visuales.")
246
- with gr.Tabs():
247
- with gr.TabItem("1. Crear Vídeo"):
248
- with gr.Row():
249
- with gr.Column(scale=2):
250
- mode_radio = gr.Radio(["Generar Guion con IA", "Usar Mi Guion"], value="Generar Guion con IA", label="Elige el método")
251
- topic_textbox = gr.Textbox(label="Tema para la IA", placeholder="Ej: La historia de la Vía Láctea")
252
- script_textbox = gr.Textbox(label="Tu Guion Completo", lines=5, visible=False, placeholder="Pega aquí tu guion...")
253
- voice_dropdown = gr.Dropdown(SPANISH_VOICES, value=SPANISH_VOICES[0], label="Elige una voz")
254
- music_upload = gr.Audio(type="filepath", label="Música de fondo (opcional)")
255
- submit_button = gr.Button("✨ Generar Vídeo", variant="primary")
256
- with gr.Column(scale=1):
257
- task_id_output = gr.Textbox(label="ID de tu Tarea (Guárdalo)", interactive=False)
258
- status_output = gr.Textbox(label="Estado", interactive=False)
259
- gr.Markdown("---")
260
- gr.Markdown("### ¿Cómo funciona?\n1. Elige un método y rellena el texto.\n2. **Copia el ID** que aparecerá.\n3. Ve a la pestaña **'2. Revisar Estado'**.")
261
- with gr.TabItem("2. Revisar Estado"):
262
- gr.Markdown("### Consulta el estado de tu vídeo")
263
- with gr.Row():
264
- task_id_input = gr.Textbox(label="Pega aquí el ID de tu tarea", scale=3)
265
- check_button = gr.Button("🔍 Verificar", scale=1)
266
- status_check_output = gr.Textbox(label="Estado Actual", interactive=False)
267
  video_output = gr.Video(label="Resultado del Vídeo")
268
  download_file_output = gr.File(label="Descargar Fichero")
 
269
  def toggle_textboxes(mode):
270
- is_ai_mode = mode == "Generar Guion con IA"
271
- return gr.update(visible=is_ai_mode), gr.update(visible=not is_ai_mode)
272
  mode_radio.change(toggle_textboxes, inputs=mode_radio, outputs=[topic_textbox, script_textbox])
273
- submit_button.click(submit_task, inputs=[mode_radio, topic_textbox, script_textbox, voice_dropdown, music_upload], outputs=[task_id_output, status_output])
274
- check_button.click(check_task_status, inputs=task_id_input, outputs=[video_output, download_file_output, status_check_output])
 
 
 
 
275
 
276
  if __name__ == "__main__":
277
  demo.launch()
 
13
  )
14
 
15
  # ------------------- Configuración & Globals -------------------
16
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
17
  logger = logging.getLogger(__name__)
18
 
19
  PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
20
  if not PEXELS_API_KEY:
21
  raise RuntimeError("Debes definir PEXELS_API_KEY en 'Settings' -> 'Variables & secrets'")
22
 
23
+ tokenizer, gpt2_model, kw_model = None, None, None
 
 
 
 
 
24
  RESULTS_DIR = "video_results"
25
  os.makedirs(RESULTS_DIR, exist_ok=True)
26
+ TASKS = {} # Diccionario para almacenar estado y progreso de tareas
27
 
 
28
  SPANISH_VOICES = [
29
  "es-ES-ElviraNeural", "es-ES-AlvaroNeural", "es-MX-DaliaNeural", "es-MX-JorgeNeural",
30
+ "es-AR-ElenaNeural", "es-AR-TomasNeural", "es-CO-SalomeNeural", "es-CO-GonzaloNeural"
 
31
  ]
32
 
33
+ # ------------------- Carga Perezosa de Modelos -------------------
34
  def get_tokenizer():
35
  global tokenizer
36
  if tokenizer is None:
37
+ logger.info("Cargando tokenizer (primera vez)...")
38
  tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
39
  if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token
40
  return tokenizer
 
42
  def get_gpt2_model():
43
  global gpt2_model
44
  if gpt2_model is None:
45
+ logger.info("Cargando modelo GPT-2 (primera vez)...")
46
  gpt2_model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish").eval()
47
  return gpt2_model
48
 
49
  def get_kw_model():
50
  global kw_model
51
  if kw_model is None:
52
+ logger.info("Cargando modelo KeyBERT (primera vez)...")
53
  kw_model = KeyBERT("distilbert-base-multilingual-cased")
54
  return kw_model
55
 
56
  # ------------------- Funciones del Pipeline de Vídeo -------------------
57
+ def update_task_progress(task_id, message):
58
+ """Actualiza el log de progreso para una tarea."""
59
+ if task_id in TASKS:
60
+ TASKS[task_id]['progress_log'] = message
61
+ logger.info(f"[{task_id}] {message}")
62
+
63
+ def gpt2_script(prompt: str) -> str:
64
  local_tokenizer = get_tokenizer()
65
  local_gpt2_model = get_gpt2_model()
66
  instruction = f"Escribe un guion corto y coherente sobre: {prompt}"
67
  inputs = local_tokenizer(instruction, return_tensors="pt", truncation=True, max_length=512)
68
  outputs = local_gpt2_model.generate(
69
+ **inputs, max_length=160 + inputs["input_ids"].shape[1], do_sample=True,
70
  top_p=0.9, top_k=40, temperature=0.7, no_repeat_ngram_size=3,
71
  pad_token_id=local_tokenizer.pad_token_id, eos_token_id=local_tokenizer.eos_token_id,
72
  )
73
  text = local_tokenizer.decode(outputs[0], skip_special_tokens=True)
74
+ return text.split("sobre:")[-1].strip()
75
 
76
  async def edge_tts_synth(text: str, voice: str, path: str):
77
  communicate = edge_tts.Communicate(text, voice)
 
80
  def keywords(text: str) -> list[str]:
81
  local_kw_model = get_kw_model()
82
  clean_text = re.sub(r"[^\w\sáéíóúñÁÉÍÓÚÑ]", "", text.lower())
83
+ kws = local_kw_model.extract_keywords(clean_text, stop_words="spanish", top_n=5)
84
+ return [k.replace(" ", "+") for k, _ in kws if k] or ["naturaleza"]
 
 
 
 
 
85
 
86
  def pexels_search(query: str, count: int) -> list[dict]:
87
+ res = requests.get("https://api.pexels.com/videos/search", headers={"Authorization": PEXELS_API_KEY},
88
+ params={"query": query, "per_page": count, "orientation": "landscape"}, timeout=20)
 
 
 
 
89
  res.raise_for_status()
90
  return res.json().get("videos", [])
91
 
 
117
  num_words = len(sentence.split())
118
  sentence_duration = num_words * time_per_word
119
  if sentence_duration < 0.1: continue
120
+ txt_clip = (TextClip(sentence, fontsize=int(video_h * 0.05), color="white",
121
+ stroke_color="black", stroke_width=1.5, method="caption",
122
+ size=(int(video_w * 0.9), None), font="Arial-Bold")
123
+ .set_start(current_time).set_duration(sentence_duration).set_position(("center", "bottom")))
 
 
 
 
124
  clips.append(txt_clip)
125
  current_time += sentence_duration
126
  return clips
 
132
  return np.repeat(noise, 3, axis=2)
133
  return VideoClip(make_frame, duration=duration).set_opacity(0.15)
134
 
135
+ def build_video(script_text: str, generate_script_flag: bool, voice: str, music_path: str | None, task_id: str) -> str:
136
  tmp_dir = tempfile.mkdtemp()
137
  try:
138
+ update_task_progress(task_id, "Paso 1/7: Generando guion...")
139
  script = gpt2_script(script_text) if generate_script_flag else script_text.strip()
140
+
141
+ update_task_progress(task_id, f"Paso 2/7: Creando audio con voz '{voice}'...")
142
  voice_path = os.path.join(tmp_dir, "voice.mp3")
143
  asyncio.run(edge_tts_synth(script, voice, voice_path))
144
  voice_clip = AudioFileClip(voice_path)
145
  video_duration = voice_clip.duration
146
  if video_duration < 1: raise ValueError("El audio generado es demasiado corto.")
147
+
148
+ update_task_progress(task_id, "Paso 3/7: Buscando clips de vídeo en Pexels...")
149
  video_paths = []
150
+ kws = keywords(script)
151
+ for i, kw in enumerate(kws):
152
+ update_task_progress(task_id, f"Paso 3/7: Buscando clips... (keyword {i+1}/{len(kws)}: '{kw}')")
153
  if len(video_paths) >= 8: break
154
  for video_data in pexels_search(kw, 2):
155
  best_file = max(video_data.get("video_files", []), key=lambda f: f.get("width", 0))
 
158
  if path: video_paths.append(path)
159
  if len(video_paths) >= 8: break
160
  if not video_paths: raise RuntimeError("No se encontraron vídeos en Pexels.")
161
+
162
+ update_task_progress(task_id, f"Paso 4/7: Ensamblando {len(video_paths)} clips de vídeo...")
163
+ segments = [VideoFileClip(p).subclip(0, min(8, VideoFileClip(p).duration)) for p in video_paths]
164
+ base_video = concatenate_videoclips(segments, method="chain")
 
 
 
165
  if base_video.duration < video_duration:
166
+ base_video = concatenate_videoclips([base_video] * math.ceil(video_duration / base_video.duration))
 
167
  base_video = base_video.subclip(0, video_duration)
168
+
169
+ update_task_progress(task_id, "Paso 5/7: Componiendo audio final...")
170
  if music_path:
171
  music_clip = loop_audio(AudioFileClip(music_path), video_duration).volumex(0.20)
172
  final_audio = CompositeAudioClip([music_clip, voice_clip])
173
  else: final_audio = voice_clip
174
+
175
+ update_task_progress(task_id, "Paso 6/7: Añadiendo subtítulos y efectos...")
176
  subtitles = make_subtitle_clips(script, base_video.w, base_video.h, video_duration)
177
  grain_effect = make_grain_clip(base_video.size, video_duration)
178
+
179
+ update_task_progress(task_id, "Paso 7/7: Renderizando vídeo final (esto puede tardar varios minutos)...")
180
  final_video = CompositeVideoClip([base_video, grain_effect, *subtitles]).set_audio(final_audio)
181
  output_path = os.path.join(tmp_dir, "final_video.mp4")
182
+ final_video.write_videofile(output_path, fps=24, codec="libx64", audio_codec="aac", threads=2, logger=None)
183
+
184
  return output_path
185
  finally:
 
186
  if 'voice_clip' in locals(): voice_clip.close()
187
  if 'music_clip' in locals(): music_clip.close()
188
  if 'base_video' in locals(): base_video.close()
 
193
  def worker(task_id: str, mode: str, topic: str, user_script: str, voice: str, music: str | None):
194
  try:
195
  text = topic if mode == "Generar Guion con IA" else user_script
196
+ result_tmp_path = build_video(text, mode == "Generar Guion con IA", voice, music, task_id)
197
  final_path = os.path.join(RESULTS_DIR, f"{task_id}.mp4")
198
  shutil.copy2(result_tmp_path, final_path)
199
+ TASKS[task_id].update({"status": "done", "result": final_path})
200
  shutil.rmtree(os.path.dirname(result_tmp_path))
201
  except Exception as e:
202
+ logger.error(f"Error en el worker para la tarea {task_id}: {e}", exc_info=True)
203
+ TASKS[task_id].update({"status": "error", "error": str(e)})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
 
205
  def janitor_thread():
206
  while True:
207
  time.sleep(3600)
208
  now = datetime.utcnow()
209
+ logger.info("[JANITOR] Realizando limpieza de vídeos antiguos...")
210
  for task_id, info in list(TASKS.items()):
211
+ if "timestamp" in info and now - info["timestamp"] > timedelta(hours=24):
212
+ if info.get("result") and os.path.exists(info.get("result")):
213
  try:
214
  os.remove(info["result"])
215
+ logger.info(f"[JANITOR] Eliminado: {info['result']}")
216
  except Exception as e:
217
+ logger.error(f"[JANITOR] Error al eliminar {info['result']}: {e}")
218
  del TASKS[task_id]
219
 
220
  threading.Thread(target=janitor_thread, daemon=True).start()
221
 
222
+ def generate_and_monitor(mode, topic, user_script, voice, music):
223
+ content = topic if mode == "Generar Guion con IA" else user_script
224
+ if not content.strip():
225
+ yield "Por favor, ingresa un tema o guion.", None, None
226
+ return
227
+
228
+ task_id = uuid.uuid4().hex[:8]
229
+ TASKS[task_id] = {"status": "processing", "progress_log": "Iniciando tarea...", "timestamp": datetime.utcnow()}
230
+
231
+ worker_thread = threading.Thread(target=worker, args=(task_id, mode, topic, user_script, voice, music), daemon=True)
232
+ worker_thread.start()
233
+
234
+ while TASKS[task_id]["status"] == "processing":
235
+ yield TASKS[task_id]['progress_log'], None, None
236
+ time.sleep(1)
237
+
238
+ if TASKS[task_id]["status"] == "error":
239
+ yield f"❌ Error: {TASKS[task_id]['error']}", None, None
240
+ elif TASKS[task_id]["status"] == "done":
241
+ yield "✅ ¡Vídeo completado!", TASKS[task_id]['result'], TASKS[task_id]['result']
242
+
243
  with gr.Blocks(title="Generador de Vídeos IA", theme=gr.themes.Soft()) as demo:
244
  gr.Markdown("# 🎬 Generador de Vídeos con IA")
245
+ gr.Markdown("Crea vídeos a partir de texto con voz, música y efectos visuales. El progreso se mostrará en tiempo real.")
246
+
247
+ with gr.Row():
248
+ with gr.Column(scale=2):
249
+ mode_radio = gr.Radio(["Generar Guion con IA", "Usar Mi Guion"], value="Generar Guion con IA", label="Elige el método")
250
+ topic_textbox = gr.Textbox(label="Tema para la IA", placeholder="Ej: La exploración espacial y sus desafíos")
251
+ script_textbox = gr.Textbox(label="Tu Guion Completo", lines=5, visible=False, placeholder="Pega aquí tu guion...")
252
+ voice_dropdown = gr.Dropdown(SPANISH_VOICES, value=SPANISH_VOICES[0], label="Elige una voz")
253
+ music_upload = gr.Audio(type="filepath", label="Música de fondo (opcional)")
254
+ submit_button = gr.Button("✨ Generar Vídeo", variant="primary")
255
+
256
+ with gr.Column(scale=2):
257
+ gr.Markdown("## Progreso y Resultados")
258
+ progress_log = gr.Textbox(label="Log de Progreso en Tiempo Real", lines=10, interactive=False)
 
 
 
 
 
 
 
 
259
  video_output = gr.Video(label="Resultado del Vídeo")
260
  download_file_output = gr.File(label="Descargar Fichero")
261
+
262
  def toggle_textboxes(mode):
263
+ return gr.update(visible=mode == "Generar Guion con IA"), gr.update(visible=mode != "Generar Guion con IA")
264
+
265
  mode_radio.change(toggle_textboxes, inputs=mode_radio, outputs=[topic_textbox, script_textbox])
266
+
267
+ submit_button.click(
268
+ fn=generate_and_monitor,
269
+ inputs=[mode_radio, topic_textbox, script_textbox, voice_dropdown, music_upload],
270
+ outputs=[progress_log, video_output, download_file_output]
271
+ )
272
 
273
  if __name__ == "__main__":
274
  demo.launch()