Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
import os, re, math, uuid, time, shutil, logging, tempfile, threading, requests, numpy as np
|
2 |
from datetime import datetime, timedelta
|
3 |
from collections import Counter
|
4 |
|
@@ -6,66 +6,80 @@ import gradio as gr
|
|
6 |
import torch
|
7 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
8 |
from keybert import KeyBERT
|
9 |
-
|
10 |
from moviepy.editor import (
|
11 |
VideoFileClip, AudioFileClip, concatenate_videoclips, concatenate_audioclips,
|
12 |
-
CompositeAudioClip, AudioClip, TextClip, CompositeVideoClip, VideoClip
|
13 |
)
|
14 |
|
|
|
15 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
|
16 |
logger = logging.getLogger(__name__)
|
17 |
|
18 |
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
|
19 |
if not PEXELS_API_KEY:
|
20 |
-
raise RuntimeError("Debes definir PEXELS_API_KEY en Variables & secrets")
|
21 |
|
|
|
22 |
tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
|
23 |
-
|
24 |
if tokenizer.pad_token is None:
|
25 |
tokenizer.pad_token = tokenizer.eos_token
|
26 |
kw_model = KeyBERT("distilbert-base-multilingual-cased")
|
27 |
-
tts_engine = TTS(model_name="tts_models/es/css10/vits", progress_bar=False, gpu=False)
|
28 |
|
29 |
RESULTS_DIR = "video_results"
|
30 |
os.makedirs(RESULTS_DIR, exist_ok=True)
|
31 |
-
TASKS = {}
|
32 |
-
|
33 |
-
#
|
34 |
-
def
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
top_p=0.9, top_k=40, temperature=0.7, no_repeat_ngram_size=3,
|
40 |
pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id,
|
41 |
)
|
42 |
-
|
43 |
-
return
|
44 |
|
45 |
-
def
|
46 |
-
|
47 |
-
|
|
|
48 |
|
49 |
def keywords(text: str) -> list[str]:
|
50 |
-
|
51 |
try:
|
52 |
-
kws = kw_model.extract_keywords(
|
53 |
return [k.replace(" ", "+") for k, _ in kws if k]
|
54 |
except Exception:
|
55 |
-
words = [w for w in
|
56 |
-
return [w for w, _ in Counter(words).most_common(5)] or ["
|
57 |
|
58 |
-
def pexels_search(
|
59 |
-
|
60 |
"https://api.pexels.com/videos/search",
|
61 |
headers={"Authorization": PEXELS_API_KEY},
|
62 |
-
params={"query":
|
63 |
timeout=20,
|
64 |
)
|
65 |
-
|
66 |
-
return
|
67 |
|
68 |
-
def
|
69 |
name = uuid.uuid4().hex + ".mp4"
|
70 |
path = os.path.join(folder, name)
|
71 |
with requests.get(url, stream=True, timeout=60) as r:
|
@@ -73,165 +87,203 @@ def download(url: str, folder: str) -> str | None:
|
|
73 |
with open(path, "wb") as f:
|
74 |
for chunk in r.iter_content(1024 * 1024):
|
75 |
f.write(chunk)
|
76 |
-
return path if os.path.getsize(path) > 1000 else None
|
77 |
|
78 |
-
def loop_audio(
|
79 |
-
if
|
80 |
-
return
|
81 |
-
loops = math.ceil(
|
82 |
-
return concatenate_audioclips([
|
83 |
|
84 |
-
def
|
85 |
sentences = [s.strip() for s in re.split(r"[.!?¿¡]", script) if s.strip()]
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
txt_clip = (
|
93 |
-
TextClip(
|
94 |
-
stroke_color="black", stroke_width=
|
95 |
-
size=(int(video_w * 0.9), None))
|
96 |
-
.set_start(
|
97 |
-
.set_duration(
|
98 |
-
.set_position(("center",
|
99 |
)
|
100 |
clips.append(txt_clip)
|
101 |
-
|
|
|
102 |
return clips
|
103 |
|
104 |
def make_grain_clip(size: tuple[int, int], duration: float):
|
105 |
w, h = size
|
106 |
-
def
|
107 |
-
noise = np.random.randint(0,
|
108 |
return np.repeat(noise, 3, axis=2)
|
109 |
-
return VideoClip(
|
110 |
-
|
111 |
-
#
|
112 |
-
def build_video(
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
117 |
voice_clip = AudioFileClip(voice_path)
|
118 |
-
|
119 |
|
120 |
-
|
|
|
121 |
for kw in keywords(script):
|
122 |
-
if len(
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
if acc >= adur + 2:
|
137 |
-
break
|
138 |
clip = VideoFileClip(path)
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
if
|
144 |
-
|
145 |
-
|
146 |
-
base = base.subclip(0, adur)
|
147 |
-
|
148 |
-
if music_fp:
|
149 |
-
mclip = loop_audio(AudioFileClip(music_fp), adur).volumex(0.2)
|
150 |
-
audio = CompositeAudioClip([mclip, voice_clip])
|
151 |
-
else:
|
152 |
-
audio = voice_clip
|
153 |
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
157 |
|
158 |
-
|
159 |
-
|
160 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
-
#
|
163 |
-
def worker(
|
164 |
try:
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
|
|
|
|
|
|
170 |
except Exception as e:
|
171 |
-
|
|
|
172 |
|
173 |
-
def
|
174 |
content = topic if mode == "Generar Guion con IA" else user_script
|
175 |
if not content.strip():
|
176 |
-
return "", "
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
while True:
|
|
|
196 |
now = datetime.utcnow()
|
197 |
-
for
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
with gr.Tabs():
|
215 |
-
with gr.TabItem("Crear Vídeo"):
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
|
236 |
if __name__ == "__main__":
|
237 |
demo.launch()
|
|
|
1 |
+
import os, re, math, uuid, time, shutil, logging, tempfile, threading, requests, asyncio, numpy as np
|
2 |
from datetime import datetime, timedelta
|
3 |
from collections import Counter
|
4 |
|
|
|
6 |
import torch
|
7 |
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
8 |
from keybert import KeyBERT
|
9 |
+
import edge_tts
|
10 |
from moviepy.editor import (
|
11 |
VideoFileClip, AudioFileClip, concatenate_videoclips, concatenate_audioclips,
|
12 |
+
CompositeAudioClip, AudioClip, TextClip, CompositeVideoClip, VideoClip
|
13 |
)
|
14 |
|
15 |
+
# ------------------- Configuración & Globals -------------------
|
16 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
19 |
PEXELS_API_KEY = os.getenv("PEXELS_API_KEY")
|
20 |
if not PEXELS_API_KEY:
|
21 |
+
raise RuntimeError("Debes definir PEXELS_API_KEY en 'Settings' -> 'Variables & secrets'")
|
22 |
|
23 |
+
# Carga de modelos (se hace una sola vez al iniciar el Space)
|
24 |
tokenizer = GPT2Tokenizer.from_pretrained("datificate/gpt2-small-spanish")
|
25 |
+
gpt2_model = GPT2LMHeadModel.from_pretrained("datificate/gpt2-small-spanish").eval()
|
26 |
if tokenizer.pad_token is None:
|
27 |
tokenizer.pad_token = tokenizer.eos_token
|
28 |
kw_model = KeyBERT("distilbert-base-multilingual-cased")
|
|
|
29 |
|
30 |
RESULTS_DIR = "video_results"
|
31 |
os.makedirs(RESULTS_DIR, exist_ok=True)
|
32 |
+
TASKS = {} # Diccionario para almacenar el estado de las tareas
|
33 |
+
|
34 |
+
# ------------------- Funciones del Pipeline de Vídeo -------------------
|
35 |
+
def get_edge_voices_es():
|
36 |
+
"""Obtiene y cachea la lista de voces en español de edge-tts."""
|
37 |
+
try:
|
38 |
+
voices = asyncio.run(edge_tts.list_voices())
|
39 |
+
es_voices = [v['ShortName'] for v in voices if v['Locale'].startswith('es-')]
|
40 |
+
return sorted(es_voices)
|
41 |
+
except Exception as e:
|
42 |
+
logger.error(f"No se pudieron cargar las voces de Edge TTS: {e}")
|
43 |
+
return ["es-ES-ElviraNeural"] # Fallback
|
44 |
+
|
45 |
+
SPANISH_VOICES = get_edge_voices_es()
|
46 |
+
|
47 |
+
def gpt2_script(prompt: str, max_len: int = 160) -> str:
|
48 |
+
instruction = f"Escribe un guion corto, interesante y coherente sobre: {prompt}"
|
49 |
+
inputs = tokenizer(instruction, return_tensors="pt", truncation=True, max_length=512)
|
50 |
+
outputs = gpt2_model.generate(
|
51 |
+
**inputs, max_length=max_len + inputs["input_ids"].shape[1], do_sample=True,
|
52 |
top_p=0.9, top_k=40, temperature=0.7, no_repeat_ngram_size=3,
|
53 |
pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id,
|
54 |
)
|
55 |
+
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
56 |
+
return text.split("sobre:")[-1].strip()[:max_len]
|
57 |
|
58 |
+
async def edge_tts_synth(text: str, voice: str, path: str):
|
59 |
+
"""Sintetiza audio usando edge-tts de forma asíncrona."""
|
60 |
+
communicate = edge_tts.Communicate(text, voice)
|
61 |
+
await communicate.save(path)
|
62 |
|
63 |
def keywords(text: str) -> list[str]:
|
64 |
+
clean_text = re.sub(r"[^\w\sáéíóúñÁÉÍÓÚÑ]", "", text.lower())
|
65 |
try:
|
66 |
+
kws = kw_model.extract_keywords(clean_text, stop_words="spanish", top_n=5)
|
67 |
return [k.replace(" ", "+") for k, _ in kws if k]
|
68 |
except Exception:
|
69 |
+
words = [w for w in clean_text.split() if len(w) > 4]
|
70 |
+
return [w for w, _ in Counter(words).most_common(5)] or ["naturaleza"]
|
71 |
|
72 |
+
def pexels_search(query: str, count: int) -> list[dict]:
|
73 |
+
res = requests.get(
|
74 |
"https://api.pexels.com/videos/search",
|
75 |
headers={"Authorization": PEXELS_API_KEY},
|
76 |
+
params={"query": query, "per_page": count, "orientation": "landscape"},
|
77 |
timeout=20,
|
78 |
)
|
79 |
+
res.raise_for_status()
|
80 |
+
return res.json().get("videos", [])
|
81 |
|
82 |
+
def download_file(url: str, folder: str) -> str | None:
|
83 |
name = uuid.uuid4().hex + ".mp4"
|
84 |
path = os.path.join(folder, name)
|
85 |
with requests.get(url, stream=True, timeout=60) as r:
|
|
|
87 |
with open(path, "wb") as f:
|
88 |
for chunk in r.iter_content(1024 * 1024):
|
89 |
f.write(chunk)
|
90 |
+
return path if os.path.exists(path) and os.path.getsize(path) > 1000 else None
|
91 |
|
92 |
+
def loop_audio(audio_clip: AudioFileClip, duration: float) -> AudioFileClip:
|
93 |
+
if audio_clip.duration >= duration:
|
94 |
+
return audio_clip.subclip(0, duration)
|
95 |
+
loops = math.ceil(duration / audio_clip.duration)
|
96 |
+
return concatenate_audioclips([audio_clip] * loops).subclip(0, duration)
|
97 |
|
98 |
+
def make_subtitle_clips(script: str, video_w: int, video_h: int, duration: float):
|
99 |
sentences = [s.strip() for s in re.split(r"[.!?¿¡]", script) if s.strip()]
|
100 |
+
if not sentences: return []
|
101 |
+
|
102 |
+
total_words = sum(len(s.split()) for s in sentences)
|
103 |
+
if total_words == 0: return []
|
104 |
+
|
105 |
+
time_per_word = duration / total_words
|
106 |
+
clips, current_time = [], 0.0
|
107 |
+
|
108 |
+
for sentence in sentences:
|
109 |
+
num_words = len(sentence.split())
|
110 |
+
sentence_duration = num_words * time_per_word
|
111 |
+
|
112 |
txt_clip = (
|
113 |
+
TextClip(sentence, fontsize=int(video_h * 0.05), color="white",
|
114 |
+
stroke_color="black", stroke_width=1.5, method="caption",
|
115 |
+
size=(int(video_w * 0.9), None), font="Arial-Bold")
|
116 |
+
.set_start(current_time)
|
117 |
+
.set_duration(sentence_duration)
|
118 |
+
.set_position(("center", "bottom"))
|
119 |
)
|
120 |
clips.append(txt_clip)
|
121 |
+
current_time += sentence_duration
|
122 |
+
|
123 |
return clips
|
124 |
|
125 |
def make_grain_clip(size: tuple[int, int], duration: float):
|
126 |
w, h = size
|
127 |
+
def make_frame(t):
|
128 |
+
noise = np.random.randint(0, 40, (h, w, 1), dtype=np.uint8)
|
129 |
return np.repeat(noise, 3, axis=2)
|
130 |
+
return VideoClip(make_frame, duration=duration).set_opacity(0.15)
|
131 |
+
|
132 |
+
# ------------------- Función Principal de Creación de Vídeo -------------------
|
133 |
+
def build_video(script_text: str, generate_script_flag: bool, voice: str, music_path: str | None) -> str:
|
134 |
+
tmp_dir = tempfile.mkdtemp()
|
135 |
+
|
136 |
+
# 1. Guion
|
137 |
+
script = gpt2_script(script_text) if generate_script_flag else script_text.strip()
|
138 |
+
|
139 |
+
# 2. Voz (TTS)
|
140 |
+
voice_path = os.path.join(tmp_dir, "voice.mp3")
|
141 |
+
asyncio.run(edge_tts_synth(script, voice, voice_path))
|
142 |
voice_clip = AudioFileClip(voice_path)
|
143 |
+
video_duration = voice_clip.duration
|
144 |
|
145 |
+
# 3. Clips de Pexels
|
146 |
+
video_paths = []
|
147 |
for kw in keywords(script):
|
148 |
+
if len(video_paths) >= 8: break
|
149 |
+
for video_data in pexels_search(kw, 2):
|
150 |
+
best_file = max(video_data["video_files"], key=lambda f: f.get("width", 0) * f.get("height", 0))
|
151 |
+
path = download_file(best_file['link'], tmp_dir)
|
152 |
+
if path:
|
153 |
+
video_paths.append(path)
|
154 |
+
if len(video_paths) >= 8: break
|
155 |
+
if not video_paths:
|
156 |
+
raise RuntimeError("No se encontraron vídeos en Pexels para este guion.")
|
157 |
+
|
158 |
+
# 4. Ensamblado de vídeo base
|
159 |
+
segments, total_duration = [], 0
|
160 |
+
for path in video_paths:
|
161 |
+
if total_duration >= video_duration + 5: break
|
|
|
|
|
162 |
clip = VideoFileClip(path)
|
163 |
+
segment = clip.subclip(0, min(8, clip.duration))
|
164 |
+
segments.append(segment)
|
165 |
+
total_duration += segment.duration
|
166 |
+
base_video = concatenate_videoclips(segments, method="chain")
|
167 |
+
if base_video.duration < video_duration:
|
168 |
+
base_video = loop_audio(base_video, video_duration) # Reutiliza loop_audio para vídeo si es necesario
|
169 |
+
base_video = base_video.subclip(0, video_duration)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
+
# 5. Audio de fondo
|
172 |
+
if music_path:
|
173 |
+
music_clip = loop_audio(AudioFileClip(music_path), video_duration).volumex(0.20)
|
174 |
+
final_audio = CompositeAudioClip([music_clip, voice_clip])
|
175 |
+
else:
|
176 |
+
final_audio = voice_clip
|
177 |
|
178 |
+
# 6. Efectos y subtítulos
|
179 |
+
subtitles = make_subtitle_clips(script, base_video.w, base_video.h, video_duration)
|
180 |
+
grain_effect = make_grain_clip(base_video.size, video_duration)
|
181 |
+
|
182 |
+
# 7. Composición final y renderizado
|
183 |
+
final_video = CompositeVideoClip([base_video, grain_effect, *subtitles]).set_audio(final_audio)
|
184 |
+
output_path = os.path.join(tmp_dir, "final_video.mp4")
|
185 |
+
final_video.write_videofile(output_path, fps=24, codec="libx264", audio_codec="aac", logger=None)
|
186 |
+
|
187 |
+
return output_path
|
188 |
|
189 |
+
# ------------------- Sistema de Tareas Asíncronas y Limpieza -------------------
|
190 |
+
def worker(task_id: str, mode: str, topic: str, user_script: str, voice: str, music: str | None):
|
191 |
try:
|
192 |
+
text = topic if mode == "Generar Guion con IA" else user_script
|
193 |
+
result_tmp_path = build_video(text, mode == "Generar Guion con IA", voice, music)
|
194 |
+
|
195 |
+
final_path = os.path.join(RESULTS_DIR, f"{task_id}.mp4")
|
196 |
+
shutil.copy2(result_tmp_path, final_path)
|
197 |
+
|
198 |
+
TASKS[task_id] = {"status": "done", "result": final_path, "timestamp": datetime.utcnow()}
|
199 |
+
shutil.rmtree(os.path.dirname(result_tmp_path)) # Limpia el directorio temporal
|
200 |
except Exception as e:
|
201 |
+
logger.error(f"Error en la tarea {task_id}: {e}", exc_info=True)
|
202 |
+
TASKS[task_id] = {"status": "error", "error": str(e), "timestamp": datetime.utcnow()}
|
203 |
|
204 |
+
def submit_task(mode, topic, user_script, voice, music):
|
205 |
content = topic if mode == "Generar Guion con IA" else user_script
|
206 |
if not content.strip():
|
207 |
+
return "", "Por favor, ingresa un tema o guion."
|
208 |
+
|
209 |
+
task_id = uuid.uuid4().hex[:8]
|
210 |
+
TASKS[task_id] = {"status": "processing", "timestamp": datetime.utcnow()}
|
211 |
+
|
212 |
+
threading.Thread(target=worker, args=(task_id, mode, topic, user_script, voice, music), daemon=True).start()
|
213 |
+
|
214 |
+
return task_id, f"✅ Tarea creada con ID: {task_id}. Comprueba el estado en unos minutos."
|
215 |
+
|
216 |
+
def check_task_status(task_id):
|
217 |
+
if not task_id or task_id not in TASKS:
|
218 |
+
return None, None, "ID de tarea no válido o no encontrado."
|
219 |
+
|
220 |
+
task_info = TASKS[task_id]
|
221 |
+
status = task_info["status"]
|
222 |
+
|
223 |
+
if status == "processing":
|
224 |
+
return None, None, "⏳ La tarea se está procesando..."
|
225 |
+
if status == "error":
|
226 |
+
return None, None, f"❌ Error en la tarea: {task_info['error']}"
|
227 |
+
if status == "done":
|
228 |
+
return task_info["result"], task_info["result"], "✅ ¡Vídeo listo para descargar!"
|
229 |
+
return None, None, "Estado desconocido."
|
230 |
+
|
231 |
+
def janitor_thread():
|
232 |
+
"""Hilo que se ejecuta periódicamente para limpiar vídeos antiguos."""
|
233 |
while True:
|
234 |
+
time.sleep(3600) # Cada hora
|
235 |
now = datetime.utcnow()
|
236 |
+
for task_id, info in list(TASKS.items()):
|
237 |
+
if now - info["timestamp"] > timedelta(hours=24):
|
238 |
+
if info.get("result") and os.path.exists(info["result"]):
|
239 |
+
try:
|
240 |
+
os.remove(info["result"])
|
241 |
+
logger.info(f"Limpiado vídeo antiguo: {info['result']}")
|
242 |
+
except Exception as e:
|
243 |
+
logger.error(f"Error al limpiar {info['result']}: {e}")
|
244 |
+
del TASKS[task_id]
|
245 |
+
|
246 |
+
threading.Thread(target=janitor_thread, daemon=True).start()
|
247 |
+
|
248 |
+
# ------------------- Interfaz de Gradio -------------------
|
249 |
+
with gr.Blocks(title="Generador de Vídeos IA", theme=gr.themes.Soft()) as demo:
|
250 |
+
gr.Markdown("# 🎬 Generador de Vídeos con IA")
|
251 |
+
gr.Markdown("Crea vídeos a partir de texto, con voz, música, subtítulos y efectos visuales.")
|
252 |
+
|
253 |
with gr.Tabs():
|
254 |
+
with gr.TabItem("1. Crear Vídeo"):
|
255 |
+
with gr.Row():
|
256 |
+
with gr.Column(scale=2):
|
257 |
+
mode_radio = gr.Radio(["Generar Guion con IA", "Usar Mi Guion"], value="Generar Guion con IA", label="Elige el método")
|
258 |
+
topic_textbox = gr.Textbox(label="Tema para la IA", placeholder="Ej: La historia de la Vía Láctea")
|
259 |
+
script_textbox = gr.Textbox(label="Tu Guion Completo", lines=5, visible=False, placeholder="Pega aquí tu guion...")
|
260 |
+
voice_dropdown = gr.Dropdown(SPANISH_VOICES, value=SPANISH_VOICES[0] if SPANISH_VOICES else None, label="Elige una voz")
|
261 |
+
music_upload = gr.Audio(type="filepath", label="Música de fondo (opcional)")
|
262 |
+
submit_button = gr.Button("✨ Generar Vídeo", variant="primary")
|
263 |
+
with gr.Column(scale=1):
|
264 |
+
task_id_output = gr.Textbox(label="ID de tu Tarea (Guárdalo)", interactive=False)
|
265 |
+
status_output = gr.Textbox(label="Estado", interactive=False)
|
266 |
+
gr.Markdown("---")
|
267 |
+
gr.Markdown("### ¿Cómo funciona?\n1. Elige un método y rellena el texto.\n2. (Opcional) Sube música de fondo.\n3. Pulsa **Generar Vídeo**.\n4. **Copia el ID** que aparecerá.\n5. Ve a la pestaña **'2. Revisar Estado'** para ver tu vídeo.")
|
268 |
+
|
269 |
+
with gr.TabItem("2. Revisar Estado"):
|
270 |
+
gr.Markdown("### Consulta el estado de tu vídeo")
|
271 |
+
with gr.Row():
|
272 |
+
task_id_input = gr.Textbox(label="Pega aquí el ID de tu tarea", scale=3)
|
273 |
+
check_button = gr.Button("🔍 Verificar", scale=1)
|
274 |
+
|
275 |
+
status_check_output = gr.Textbox(label="Estado Actual", interactive=False)
|
276 |
+
video_output = gr.Video(label="Resultado del Vídeo")
|
277 |
+
download_file_output = gr.File(label="Descargar Fichero")
|
278 |
+
|
279 |
+
# Lógica de la interfaz
|
280 |
+
def toggle_textboxes(mode):
|
281 |
+
is_ai_mode = mode == "Generar Guion con IA"
|
282 |
+
return gr.update(visible=is_ai_mode), gr.update(visible=not is_ai_mode)
|
283 |
+
|
284 |
+
mode_radio.change(toggle_textboxes, inputs=mode_radio, outputs=[topic_textbox, script_textbox])
|
285 |
+
submit_button.click(submit_task, inputs=[mode_radio, topic_textbox, script_textbox, voice_dropdown, music_upload], outputs=[task_id_output, status_output])
|
286 |
+
check_button.click(check_task_status, inputs=task_id_input, outputs=[video_output, download_file_output, status_check_output])
|
287 |
|
288 |
if __name__ == "__main__":
|
289 |
demo.launch()
|