gnosticdev commited on
Commit
18e4b7b
·
verified ·
1 Parent(s): d5732bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -127
app.py CHANGED
@@ -20,52 +20,72 @@ PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
20
  if not PEXELS_API_KEY:
21
  raise RuntimeError("Debes definir PEXELS_API_KEY en 'Settings' -> 'Variables & secrets'")
22
 
23
- # Carga de modelos (se hace una sola vez al iniciar el Space)
24
- tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
25
- gpt2_model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish").eval()
26
- if tokenizer.pad_token is None:
27
- tokenizer.pad_token = tokenizer.eos_token
28
- kw_model = KeyBERT("distilbert-base-multilingual-cased")
29
 
30
  RESULTS_DIR = "video_results"
31
  os.makedirs(RESULTS_DIR, exist_ok=True)
32
- TASKS = {} # Diccionario para almacenar el estado de las tareas
33
 
34
- # ------------------- Funciones del Pipeline de Vídeo -------------------
35
- def get_edge_voices_es():
36
- """Obtiene y cachea la lista de voces en español de edge-tts."""
37
- try:
38
- voices = asyncio.run(edge_tts.list_voices())
39
- es_voices = [v['ShortName'] for v in voices if v['Locale'].startswith('es-')]
40
- return sorted(es_voices)
41
- except Exception as e:
42
- logger.error(f"No se pudieron cargar las voces de Edge TTS: {e}")
43
- return ["es-ES-ElviraNeural"] # Fallback
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
- SPANISH_VOICES = get_edge_voices_es()
 
 
 
 
 
46
 
 
47
  def gpt2_script(prompt: str, max_len: int = 160) -> str:
48
- instruction = f"Escribe un guion corto, interesante y coherente sobre: {prompt}"
49
- inputs = tokenizer(instruction, return_tensors="pt", truncation=True, max_length=512)
50
- outputs = gpt2_model.generate(
 
 
51
  **inputs, max_length=max_len + inputs["input_ids"].shape[1], do_sample=True,
52
  top_p=0.9, top_k=40, temperature=0.7, no_repeat_ngram_size=3,
53
- pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id,
54
  )
55
- text = tokenizer.decode(outputs[0], skip_special_tokens=True)
56
  return text.split("sobre:")[-1].strip()[:max_len]
57
 
58
  async def edge_tts_synth(text: str, voice: str, path: str):
59
- """Sintetiza audio usando edge-tts de forma asíncrona."""
60
  communicate = edge_tts.Communicate(text, voice)
61
  await communicate.save(path)
62
 
63
  def keywords(text: str) -> list[str]:
 
64
  clean_text = re.sub(r"[^\w\sáéíóúñÁÉÍÓÚÑ]", "", text.lower())
65
  try:
66
- kws = kw_model.extract_keywords(clean_text, stop_words="spanish", top_n=5)
67
  return [k.replace(" ", "+") for k, _ in kws if k]
68
- except Exception:
 
69
  words = [w for w in clean_text.split() if len(w) > 4]
70
  return [w for w, _ in Counter(words).most_common(5)] or ["naturaleza"]
71
 
@@ -80,35 +100,33 @@ def pexels_search(query: str, count: int) -> list[dict]:
80
  return res.json().get("videos", [])
81
 
82
  def download_file(url: str, folder: str) -> str | None:
83
- name = uuid.uuid4().hex + ".mp4"
84
- path = os.path.join(folder, name)
85
- with requests.get(url, stream=True, timeout=60) as r:
86
- r.raise_for_status()
87
- with open(path, "wb") as f:
88
- for chunk in r.iter_content(1024 * 1024):
89
- f.write(chunk)
90
- return path if os.path.exists(path) and os.path.getsize(path) > 1000 else None
 
 
 
91
 
92
  def loop_audio(audio_clip: AudioFileClip, duration: float) -> AudioFileClip:
93
- if audio_clip.duration >= duration:
94
- return audio_clip.subclip(0, duration)
95
  loops = math.ceil(duration / audio_clip.duration)
96
  return concatenate_audioclips([audio_clip] * loops).subclip(0, duration)
97
 
98
  def make_subtitle_clips(script: str, video_w: int, video_h: int, duration: float):
99
  sentences = [s.strip() for s in re.split(r"[.!?¿¡]", script) if s.strip()]
100
  if not sentences: return []
101
-
102
- total_words = sum(len(s.split()) for s in sentences)
103
- if total_words == 0: return []
104
-
105
  time_per_word = duration / total_words
106
  clips, current_time = [], 0.0
107
-
108
  for sentence in sentences:
109
  num_words = len(sentence.split())
110
  sentence_duration = num_words * time_per_word
111
-
112
  txt_clip = (
113
  TextClip(sentence, fontsize=int(video_h * 0.05), color="white",
114
  stroke_color="black", stroke_width=1.5, method="caption",
@@ -119,7 +137,6 @@ def make_subtitle_clips(script: str, video_w: int, video_h: int, duration: float
119
  )
120
  clips.append(txt_clip)
121
  current_time += sentence_duration
122
-
123
  return clips
124
 
125
  def make_grain_clip(size: tuple[int, int], duration: float):
@@ -129,109 +146,87 @@ def make_grain_clip(size: tuple[int, int], duration: float):
129
  return np.repeat(noise, 3, axis=2)
130
  return VideoClip(make_frame, duration=duration).set_opacity(0.15)
131
 
132
- # ------------------- Función Principal de Creación de Vídeo -------------------
133
  def build_video(script_text: str, generate_script_flag: bool, voice: str, music_path: str | None) -> str:
134
  tmp_dir = tempfile.mkdtemp()
135
-
136
- # 1. Guion
137
- script = gpt2_script(script_text) if generate_script_flag else script_text.strip()
138
-
139
- # 2. Voz (TTS)
140
- voice_path = os.path.join(tmp_dir, "voice.mp3")
141
- asyncio.run(edge_tts_synth(script, voice, voice_path))
142
- voice_clip = AudioFileClip(voice_path)
143
- video_duration = voice_clip.duration
144
-
145
- # 3. Clips de Pexels
146
- video_paths = []
147
- for kw in keywords(script):
148
- if len(video_paths) >= 8: break
149
- for video_data in pexels_search(kw, 2):
150
- best_file = max(video_data["video_files"], key=lambda f: f.get("width", 0) * f.get("height", 0))
151
- path = download_file(best_file['link'], tmp_dir)
152
- if path:
153
- video_paths.append(path)
154
  if len(video_paths) >= 8: break
155
- if not video_paths:
156
- raise RuntimeError("No se encontraron vídeos en Pexels para este guion.")
157
-
158
- # 4. Ensamblado de vídeo base
159
- segments, total_duration = [], 0
160
- for path in video_paths:
161
- if total_duration >= video_duration + 5: break
162
- clip = VideoFileClip(path)
163
- segment = clip.subclip(0, min(8, clip.duration))
164
- segments.append(segment)
165
- total_duration += segment.duration
166
- base_video = concatenate_videoclips(segments, method="chain")
167
- if base_video.duration < video_duration:
168
- base_video = loop_audio(base_video, video_duration) # Reutiliza loop_audio para vídeo si es necesario
169
- base_video = base_video.subclip(0, video_duration)
170
-
171
- # 5. Audio de fondo
172
- if music_path:
173
- music_clip = loop_audio(AudioFileClip(music_path), video_duration).volumex(0.20)
174
- final_audio = CompositeAudioClip([music_clip, voice_clip])
175
- else:
176
- final_audio = voice_clip
177
-
178
- # 6. Efectos y subtítulos
179
- subtitles = make_subtitle_clips(script, base_video.w, base_video.h, video_duration)
180
- grain_effect = make_grain_clip(base_video.size, video_duration)
181
-
182
- # 7. Composición final y renderizado
183
- final_video = CompositeVideoClip([base_video, grain_effect, *subtitles]).set_audio(final_audio)
184
- output_path = os.path.join(tmp_dir, "final_video.mp4")
185
- final_video.write_videofile(output_path, fps=24, codec="libx264", audio_codec="aac", logger=None)
186
-
187
- return output_path
 
 
 
188
 
189
- # ------------------- Sistema de Tareas Asíncronas y Limpieza -------------------
190
  def worker(task_id: str, mode: str, topic: str, user_script: str, voice: str, music: str | None):
191
  try:
192
  text = topic if mode == "Generar Guion con IA" else user_script
193
  result_tmp_path = build_video(text, mode == "Generar Guion con IA", voice, music)
194
-
195
  final_path = os.path.join(RESULTS_DIR, f"{task_id}.mp4")
196
  shutil.copy2(result_tmp_path, final_path)
197
-
198
  TASKS[task_id] = {"status": "done", "result": final_path, "timestamp": datetime.utcnow()}
199
- shutil.rmtree(os.path.dirname(result_tmp_path)) # Limpia el directorio temporal
200
  except Exception as e:
201
  logger.error(f"Error en la tarea {task_id}: {e}", exc_info=True)
202
  TASKS[task_id] = {"status": "error", "error": str(e), "timestamp": datetime.utcnow()}
203
 
204
  def submit_task(mode, topic, user_script, voice, music):
205
  content = topic if mode == "Generar Guion con IA" else user_script
206
- if not content.strip():
207
- return "", "Por favor, ingresa un tema o guion."
208
-
209
  task_id = uuid.uuid4().hex[:8]
210
  TASKS[task_id] = {"status": "processing", "timestamp": datetime.utcnow()}
211
-
212
  threading.Thread(target=worker, args=(task_id, mode, topic, user_script, voice, music), daemon=True).start()
213
-
214
  return task_id, f"✅ Tarea creada con ID: {task_id}. Comprueba el estado en unos minutos."
215
 
216
  def check_task_status(task_id):
217
- if not task_id or task_id not in TASKS:
218
- return None, None, "ID de tarea no válido o no encontrado."
219
-
220
  task_info = TASKS[task_id]
221
  status = task_info["status"]
222
-
223
- if status == "processing":
224
- return None, None, " La tarea se está procesando..."
225
- if status == "error":
226
- return None, None, f"❌ Error en la tarea: {task_info['error']}"
227
- if status == "done":
228
- return task_info["result"], task_info["result"], "✅ ¡Vídeo listo para descargar!"
229
  return None, None, "Estado desconocido."
230
 
231
  def janitor_thread():
232
- """Hilo que se ejecuta periódicamente para limpiar vídeos antiguos."""
233
  while True:
234
- time.sleep(3600) # Cada hora
235
  now = datetime.utcnow()
236
  for task_id, info in list(TASKS.items()):
237
  if now - info["timestamp"] > timedelta(hours=24):
@@ -245,11 +240,9 @@ def janitor_thread():
245
 
246
  threading.Thread(target=janitor_thread, daemon=True).start()
247
 
248
- # ------------------- Interfaz de Gradio -------------------
249
  with gr.Blocks(title="Generador de Vídeos IA", theme=gr.themes.Soft()) as demo:
250
  gr.Markdown("# 🎬 Generador de Vídeos con IA")
251
- gr.Markdown("Crea vídeos a partir de texto, con voz, música, subtítulos y efectos visuales.")
252
-
253
  with gr.Tabs():
254
  with gr.TabItem("1. Crear Vídeo"):
255
  with gr.Row():
@@ -257,30 +250,25 @@ with gr.Blocks(title="Generador de Vídeos IA", theme=gr.themes.Soft()) as demo:
257
  mode_radio = gr.Radio(["Generar Guion con IA", "Usar Mi Guion"], value="Generar Guion con IA", label="Elige el método")
258
  topic_textbox = gr.Textbox(label="Tema para la IA", placeholder="Ej: La historia de la Vía Láctea")
259
  script_textbox = gr.Textbox(label="Tu Guion Completo", lines=5, visible=False, placeholder="Pega aquí tu guion...")
260
- voice_dropdown = gr.Dropdown(SPANISH_VOICES, value=SPANISH_VOICES[0] if SPANISH_VOICES else None, label="Elige una voz")
261
  music_upload = gr.Audio(type="filepath", label="Música de fondo (opcional)")
262
  submit_button = gr.Button("✨ Generar Vídeo", variant="primary")
263
  with gr.Column(scale=1):
264
  task_id_output = gr.Textbox(label="ID de tu Tarea (Guárdalo)", interactive=False)
265
  status_output = gr.Textbox(label="Estado", interactive=False)
266
  gr.Markdown("---")
267
- gr.Markdown("### ¿Cómo funciona?\n1. Elige un método y rellena el texto.\n2. (Opcional) Sube música de fondo.\n3. Pulsa **Generar Vídeo**.\n4. **Copia el ID** que aparecerá.\n5. Ve a la pestaña **'2. Revisar Estado'** para ver tu vídeo.")
268
-
269
  with gr.TabItem("2. Revisar Estado"):
270
  gr.Markdown("### Consulta el estado de tu vídeo")
271
  with gr.Row():
272
  task_id_input = gr.Textbox(label="Pega aquí el ID de tu tarea", scale=3)
273
  check_button = gr.Button("🔍 Verificar", scale=1)
274
-
275
  status_check_output = gr.Textbox(label="Estado Actual", interactive=False)
276
  video_output = gr.Video(label="Resultado del Vídeo")
277
  download_file_output = gr.File(label="Descargar Fichero")
278
-
279
- # Lógica de la interfaz
280
  def toggle_textboxes(mode):
281
  is_ai_mode = mode == "Generar Guion con IA"
282
  return gr.update(visible=is_ai_mode), gr.update(visible=not is_ai_mode)
283
-
284
  mode_radio.change(toggle_textboxes, inputs=mode_radio, outputs=[topic_textbox, script_textbox])
285
  submit_button.click(submit_task, inputs=[mode_radio, topic_textbox, script_textbox, voice_dropdown, music_upload], outputs=[task_id_output, status_output])
286
  check_button.click(check_task_status, inputs=task_id_input, outputs=[video_output, download_file_output, status_check_output])
 
20
  if not PEXELS_API_KEY:
21
  raise RuntimeError("Debes definir PEXELS_API_KEY en 'Settings' -> 'Variables & secrets'")
22
 
23
+ # --- Modelos inicializados como None para Carga Perezosa (Lazy Loading) ---
24
+ tokenizer = None
25
+ gpt2_model = None
26
+ kw_model = None
27
+ # ---
 
28
 
29
  RESULTS_DIR = "video_results"
30
  os.makedirs(RESULTS_DIR, exist_ok=True)
31
+ TASKS = {}
32
 
33
+ # --- Lista de Voces Fija para un Arranque Instantáneo ---
34
+ SPANISH_VOICES = [
35
+ "es-ES-ElviraNeural", "es-ES-AlvaroNeural", "es-MX-DaliaNeural", "es-MX-JorgeNeural",
36
+ "es-AR-ElenaNeural", "es-AR-TomasNeural", "es-CO-SalomeNeural", "es-CO-GonzaloNeural",
37
+ "es-US-PalomaNeural", "es-US-AlonsoNeural"
38
+ ]
39
+
40
+ # ------------------- Funciones para cargar modelos bajo demanda -------------------
41
+ def get_tokenizer():
42
+ global tokenizer
43
+ if tokenizer is None:
44
+ logger.info("Cargando tokenizer por primera vez...")
45
+ tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
46
+ if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token
47
+ return tokenizer
48
+
49
+ def get_gpt2_model():
50
+ global gpt2_model
51
+ if gpt2_model is None:
52
+ logger.info("Cargando modelo GPT-2 por primera vez...")
53
+ gpt2_model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish").eval()
54
+ return gpt2_model
55
 
56
+ def get_kw_model():
57
+ global kw_model
58
+ if kw_model is None:
59
+ logger.info("Cargando modelo KeyBERT por primera vez...")
60
+ kw_model = KeyBERT("distilbert-base-multilingual-cased")
61
+ return kw_model
62
 
63
+ # ------------------- Funciones del Pipeline de Vídeo -------------------
64
  def gpt2_script(prompt: str, max_len: int = 160) -> str:
65
+ local_tokenizer = get_tokenizer()
66
+ local_gpt2_model = get_gpt2_model()
67
+ instruction = f"Escribe un guion corto y coherente sobre: {prompt}"
68
+ inputs = local_tokenizer(instruction, return_tensors="pt", truncation=True, max_length=512)
69
+ outputs = local_gpt2_model.generate(
70
  **inputs, max_length=max_len + inputs["input_ids"].shape[1], do_sample=True,
71
  top_p=0.9, top_k=40, temperature=0.7, no_repeat_ngram_size=3,
72
+ pad_token_id=local_tokenizer.pad_token_id, eos_token_id=local_tokenizer.eos_token_id,
73
  )
74
+ text = local_tokenizer.decode(outputs[0], skip_special_tokens=True)
75
  return text.split("sobre:")[-1].strip()[:max_len]
76
 
77
  async def edge_tts_synth(text: str, voice: str, path: str):
 
78
  communicate = edge_tts.Communicate(text, voice)
79
  await communicate.save(path)
80
 
81
  def keywords(text: str) -> list[str]:
82
+ local_kw_model = get_kw_model()
83
  clean_text = re.sub(r"[^\w\sáéíóúñÁÉÍÓÚÑ]", "", text.lower())
84
  try:
85
+ kws = local_kw_model.extract_keywords(clean_text, stop_words="spanish", top_n=5)
86
  return [k.replace(" ", "+") for k, _ in kws if k]
87
+ except Exception as e:
88
+ logger.warning(f"KeyBERT falló, usando método simple. Error: {e}")
89
  words = [w for w in clean_text.split() if len(w) > 4]
90
  return [w for w, _ in Counter(words).most_common(5)] or ["naturaleza"]
91
 
 
100
  return res.json().get("videos", [])
101
 
102
  def download_file(url: str, folder: str) -> str | None:
103
+ try:
104
+ name = uuid.uuid4().hex + ".mp4"
105
+ path = os.path.join(folder, name)
106
+ with requests.get(url, stream=True, timeout=60) as r:
107
+ r.raise_for_status()
108
+ with open(path, "wb") as f:
109
+ for chunk in r.iter_content(1024 * 1024): f.write(chunk)
110
+ return path if os.path.exists(path) and os.path.getsize(path) > 1000 else None
111
+ except Exception as e:
112
+ logger.error(f"Fallo al descargar {url}: {e}")
113
+ return None
114
 
115
  def loop_audio(audio_clip: AudioFileClip, duration: float) -> AudioFileClip:
116
+ if audio_clip.duration >= duration: return audio_clip.subclip(0, duration)
 
117
  loops = math.ceil(duration / audio_clip.duration)
118
  return concatenate_audioclips([audio_clip] * loops).subclip(0, duration)
119
 
120
  def make_subtitle_clips(script: str, video_w: int, video_h: int, duration: float):
121
  sentences = [s.strip() for s in re.split(r"[.!?¿¡]", script) if s.strip()]
122
  if not sentences: return []
123
+ total_words = sum(len(s.split()) for s in sentences) or 1
 
 
 
124
  time_per_word = duration / total_words
125
  clips, current_time = [], 0.0
 
126
  for sentence in sentences:
127
  num_words = len(sentence.split())
128
  sentence_duration = num_words * time_per_word
129
+ if sentence_duration < 0.1: continue
130
  txt_clip = (
131
  TextClip(sentence, fontsize=int(video_h * 0.05), color="white",
132
  stroke_color="black", stroke_width=1.5, method="caption",
 
137
  )
138
  clips.append(txt_clip)
139
  current_time += sentence_duration
 
140
  return clips
141
 
142
  def make_grain_clip(size: tuple[int, int], duration: float):
 
146
  return np.repeat(noise, 3, axis=2)
147
  return VideoClip(make_frame, duration=duration).set_opacity(0.15)
148
 
 
149
  def build_video(script_text: str, generate_script_flag: bool, voice: str, music_path: str | None) -> str:
150
  tmp_dir = tempfile.mkdtemp()
151
+ try:
152
+ script = gpt2_script(script_text) if generate_script_flag else script_text.strip()
153
+ voice_path = os.path.join(tmp_dir, "voice.mp3")
154
+ asyncio.run(edge_tts_synth(script, voice, voice_path))
155
+ voice_clip = AudioFileClip(voice_path)
156
+ video_duration = voice_clip.duration
157
+ if video_duration < 1: raise ValueError("El audio generado es demasiado corto.")
158
+ video_paths = []
159
+ for kw in keywords(script):
 
 
 
 
 
 
 
 
 
 
160
  if len(video_paths) >= 8: break
161
+ for video_data in pexels_search(kw, 2):
162
+ best_file = max(video_data.get("video_files", []), key=lambda f: f.get("width", 0))
163
+ if best_file:
164
+ path = download_file(best_file.get('link'), tmp_dir)
165
+ if path: video_paths.append(path)
166
+ if len(video_paths) >= 8: break
167
+ if not video_paths: raise RuntimeError("No se encontraron vídeos en Pexels.")
168
+ segments = []
169
+ for path in video_paths:
170
+ try: segments.append(VideoFileClip(path))
171
+ except Exception as e: logger.warning(f"No se pudo cargar el clip {path}: {e}")
172
+ if not segments: raise RuntimeError("Los clips descargados no son válidos.")
173
+ final_segments = [s.subclip(0, min(8, s.duration)) for s in segments]
174
+ base_video = concatenate_videoclips(final_segments, method="chain")
175
+ if base_video.duration < video_duration:
176
+ num_loops = math.ceil(video_duration / base_video.duration)
177
+ base_video = concatenate_videoclips([base_video] * num_loops, method="chain")
178
+ base_video = base_video.subclip(0, video_duration)
179
+ if music_path:
180
+ music_clip = loop_audio(AudioFileClip(music_path), video_duration).volumex(0.20)
181
+ final_audio = CompositeAudioClip([music_clip, voice_clip])
182
+ else: final_audio = voice_clip
183
+ subtitles = make_subtitle_clips(script, base_video.w, base_video.h, video_duration)
184
+ grain_effect = make_grain_clip(base_video.size, video_duration)
185
+ final_video = CompositeVideoClip([base_video, grain_effect, *subtitles]).set_audio(final_audio)
186
+ output_path = os.path.join(tmp_dir, "final_video.mp4")
187
+ final_video.write_videofile(output_path, fps=24, codec="libx264", audio_codec="aac", threads=2, logger=None)
188
+ return output_path
189
+ finally:
190
+ # Intenta cerrar todos los clips de MoviePy para liberar memoria
191
+ if 'voice_clip' in locals(): voice_clip.close()
192
+ if 'music_clip' in locals(): music_clip.close()
193
+ if 'base_video' in locals(): base_video.close()
194
+ if 'final_video' in locals(): final_video.close()
195
+ if 'segments' in locals():
196
+ for seg in segments: seg.close()
197
 
 
198
  def worker(task_id: str, mode: str, topic: str, user_script: str, voice: str, music: str | None):
199
  try:
200
  text = topic if mode == "Generar Guion con IA" else user_script
201
  result_tmp_path = build_video(text, mode == "Generar Guion con IA", voice, music)
 
202
  final_path = os.path.join(RESULTS_DIR, f"{task_id}.mp4")
203
  shutil.copy2(result_tmp_path, final_path)
 
204
  TASKS[task_id] = {"status": "done", "result": final_path, "timestamp": datetime.utcnow()}
205
+ shutil.rmtree(os.path.dirname(result_tmp_path))
206
  except Exception as e:
207
  logger.error(f"Error en la tarea {task_id}: {e}", exc_info=True)
208
  TASKS[task_id] = {"status": "error", "error": str(e), "timestamp": datetime.utcnow()}
209
 
210
  def submit_task(mode, topic, user_script, voice, music):
211
  content = topic if mode == "Generar Guion con IA" else user_script
212
+ if not content.strip(): return "", "Por favor, ingresa un tema o guion."
 
 
213
  task_id = uuid.uuid4().hex[:8]
214
  TASKS[task_id] = {"status": "processing", "timestamp": datetime.utcnow()}
 
215
  threading.Thread(target=worker, args=(task_id, mode, topic, user_script, voice, music), daemon=True).start()
 
216
  return task_id, f"✅ Tarea creada con ID: {task_id}. Comprueba el estado en unos minutos."
217
 
218
  def check_task_status(task_id):
219
+ if not task_id or task_id not in TASKS: return None, None, "ID de tarea no válido o no encontrado."
 
 
220
  task_info = TASKS[task_id]
221
  status = task_info["status"]
222
+ if status == "processing": return None, None, "⏳ La tarea se está procesando..."
223
+ if status == "error": return None, None, f"❌ Error: {task_info['error']}"
224
+ if status == "done": return task_info["result"], task_info["result"], " ¡Vídeo listo!"
 
 
 
 
225
  return None, None, "Estado desconocido."
226
 
227
  def janitor_thread():
 
228
  while True:
229
+ time.sleep(3600)
230
  now = datetime.utcnow()
231
  for task_id, info in list(TASKS.items()):
232
  if now - info["timestamp"] > timedelta(hours=24):
 
240
 
241
  threading.Thread(target=janitor_thread, daemon=True).start()
242
 
 
243
  with gr.Blocks(title="Generador de Vídeos IA", theme=gr.themes.Soft()) as demo:
244
  gr.Markdown("# 🎬 Generador de Vídeos con IA")
245
+ gr.Markdown("Crea vídeos a partir de texto, con voz, música y efectos visuales.")
 
246
  with gr.Tabs():
247
  with gr.TabItem("1. Crear Vídeo"):
248
  with gr.Row():
 
250
  mode_radio = gr.Radio(["Generar Guion con IA", "Usar Mi Guion"], value="Generar Guion con IA", label="Elige el método")
251
  topic_textbox = gr.Textbox(label="Tema para la IA", placeholder="Ej: La historia de la Vía Láctea")
252
  script_textbox = gr.Textbox(label="Tu Guion Completo", lines=5, visible=False, placeholder="Pega aquí tu guion...")
253
+ voice_dropdown = gr.Dropdown(SPANISH_VOICES, value=SPANISH_VOICES[0], label="Elige una voz")
254
  music_upload = gr.Audio(type="filepath", label="Música de fondo (opcional)")
255
  submit_button = gr.Button("✨ Generar Vídeo", variant="primary")
256
  with gr.Column(scale=1):
257
  task_id_output = gr.Textbox(label="ID de tu Tarea (Guárdalo)", interactive=False)
258
  status_output = gr.Textbox(label="Estado", interactive=False)
259
  gr.Markdown("---")
260
+ gr.Markdown("### ¿Cómo funciona?\n1. Elige un método y rellena el texto.\n2. **Copia el ID** que aparecerá.\n3. Ve a la pestaña **'2. Revisar Estado'**.")
 
261
  with gr.TabItem("2. Revisar Estado"):
262
  gr.Markdown("### Consulta el estado de tu vídeo")
263
  with gr.Row():
264
  task_id_input = gr.Textbox(label="Pega aquí el ID de tu tarea", scale=3)
265
  check_button = gr.Button("🔍 Verificar", scale=1)
 
266
  status_check_output = gr.Textbox(label="Estado Actual", interactive=False)
267
  video_output = gr.Video(label="Resultado del Vídeo")
268
  download_file_output = gr.File(label="Descargar Fichero")
 
 
269
  def toggle_textboxes(mode):
270
  is_ai_mode = mode == "Generar Guion con IA"
271
  return gr.update(visible=is_ai_mode), gr.update(visible=not is_ai_mode)
 
272
  mode_radio.change(toggle_textboxes, inputs=mode_radio, outputs=[topic_textbox, script_textbox])
273
  submit_button.click(submit_task, inputs=[mode_radio, topic_textbox, script_textbox, voice_dropdown, music_upload], outputs=[task_id_output, status_output])
274
  check_button.click(check_task_status, inputs=task_id_input, outputs=[video_output, download_file_output, status_check_output])