seawolf2357 commited on
Commit
4e94f64
ยท
verified ยท
1 Parent(s): bcb1aaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -27
app.py CHANGED
@@ -49,6 +49,16 @@ class VideoGenerationConfig:
49
  enable_model_cpu_offload: bool = True
50
  enable_vae_slicing: bool = True
51
  enable_vae_tiling: bool = True
 
 
 
 
 
 
 
 
 
 
52
 
53
  config = VideoGenerationConfig()
54
  MAX_SEED = np.iinfo(np.int32).max
@@ -230,19 +240,33 @@ class VideoGenerator:
230
  if len(prompt) > 500:
231
  return False, "โš ๏ธ Prompt is too long (max 500 characters)"
232
 
233
- if duration < self.config.min_frames / self.config.fixed_fps:
234
- return False, f"โฑ๏ธ Duration too short (min {self.config.min_frames/self.config.fixed_fps:.1f}s)"
 
235
 
236
- if duration > self.config.max_frames / self.config.fixed_fps:
237
- return False, f"โฑ๏ธ Duration too long (max {self.config.max_frames/self.config.fixed_fps:.1f}s)"
 
 
 
 
 
 
 
 
 
 
238
 
239
  # GPU ๋ฉ”๋ชจ๋ฆฌ ์ฒดํฌ
240
  if torch.cuda.is_available():
241
- free_memory = torch.cuda.get_device_properties(0).total_memory - torch.cuda.memory_allocated()
242
- required_memory = (height * width * 3 * 8 * duration * config.fixed_fps) / (1024**3) # ๋Œ€๋žต์ ์ธ ์ถ”์ •
243
- if free_memory < required_memory * 2: # 2๋ฐฐ ์—ฌ์œ  ํ™•๋ณด
244
- clear_gpu_memory()
245
- return False, "โš ๏ธ Not enough GPU memory. Try smaller dimensions or shorter duration."
 
 
 
246
 
247
  return True, None
248
 
@@ -273,25 +297,36 @@ def handle_image_upload(image):
273
 
274
  def get_duration(input_image, prompt, height, width, negative_prompt,
275
  duration_seconds, guidance_scale, steps, seed, randomize_seed, progress):
276
- # GPU ์‚ฌ์šฉ๋Ÿ‰์— ๋”ฐ๋ผ ๋™์ ์œผ๋กœ duration ์กฐ์ •
277
  base_duration = 60
278
- if steps > 4:
 
 
 
 
279
  base_duration += 15
 
 
280
  if duration_seconds > 2:
281
- base_duration += 15
 
 
282
 
283
- # ํ•ด์ƒ๋„์— ๋”ฐ๋ฅธ ์ถ”๊ฐ€ ์‹œ๊ฐ„
284
  pixels = height * width
285
- if pixels > 500000:
286
  base_duration += 20
 
 
287
 
288
- return min(base_duration, 120) # ์ตœ๋Œ€ 120์ดˆ
 
289
 
290
  @spaces.GPU(duration=get_duration)
291
  @measure_time
292
  def generate_video(input_image, prompt, height, width,
293
  negative_prompt=config.default_negative_prompt,
294
- duration_seconds=2, guidance_scale=1, steps=4,
295
  seed=42, randomize_seed=False,
296
  progress=gr.Progress(track_tqdm=True)):
297
 
@@ -302,6 +337,10 @@ def generate_video(input_image, prompt, height, width,
302
  try:
303
  progress(0.1, desc="๐Ÿ” Validating inputs...")
304
 
 
 
 
 
305
  # ์ž…๋ ฅ ๊ฒ€์ฆ
306
  is_valid, error_msg = video_generator.validate_inputs(
307
  input_image, prompt, height, width, duration_seconds, steps
@@ -315,8 +354,15 @@ def generate_video(input_image, prompt, height, width,
315
  progress(0.2, desc="๐ŸŽฏ Preparing image...")
316
  target_h = max(config.mod_value, (int(height) // config.mod_value) * config.mod_value)
317
  target_w = max(config.mod_value, (int(width) // config.mod_value) * config.mod_value)
318
- num_frames = np.clip(int(round(duration_seconds * config.fixed_fps)),
319
- config.min_frames, config.max_frames)
 
 
 
 
 
 
 
320
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
321
 
322
  # ์ด๋ฏธ์ง€ ๋ฆฌ์‚ฌ์ด์ฆˆ (๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์ )
@@ -357,6 +403,7 @@ def generate_video(input_image, prompt, height, width,
357
  export_to_video(output_frames_list, video_path, fps=config.fixed_fps)
358
 
359
  progress(1.0, desc="โœจ Complete!")
 
360
  return video_path, current_seed
361
 
362
  except Exception as e:
@@ -544,11 +591,12 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
544
  # GPU ๋ฉ”๋ชจ๋ฆฌ ๊ฒฝ๊ณ 
545
  gr.HTML("""
546
  <div class="warning-box">
547
- <strong>๐Ÿ’ก Performance Tips:</strong>
548
  <ul style="margin: 5px 0; padding-left: 20px;">
549
- <li>Start with lower resolution (512x512) for testing</li>
550
- <li>Keep duration under 2 seconds for stable generation</li>
551
- <li>Use 4-8 steps for optimal speed/quality balance</li>
 
552
  </ul>
553
  </div>
554
  """)
@@ -572,11 +620,11 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
572
  )
573
 
574
  duration_input = gr.Slider(
575
- minimum=round(config.min_frames/config.fixed_fps, 1),
576
- maximum=round(config.max_frames/config.fixed_fps, 1),
577
  step=0.1,
578
- value=2,
579
- label="โฑ๏ธ Video Duration (seconds)",
580
  elem_classes="slider-container"
581
  )
582
 
@@ -699,4 +747,4 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
699
  )
700
 
701
  if __name__ == "__main__":
702
- demo.queue(max_size=2).launch() # ํ ํฌ๊ธฐ ์ œํ•œ์œผ๋กœ ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ
 
49
  enable_model_cpu_offload: bool = True
50
  enable_vae_slicing: bool = True
51
  enable_vae_tiling: bool = True
52
+
53
+ @property
54
+ def max_duration(self):
55
+ """์ตœ๋Œ€ ํ—ˆ์šฉ duration (์ดˆ)"""
56
+ return self.max_frames / self.fixed_fps
57
+
58
+ @property
59
+ def min_duration(self):
60
+ """์ตœ์†Œ ํ—ˆ์šฉ duration (์ดˆ)"""
61
+ return self.min_frames / self.fixed_fps
62
 
63
  config = VideoGenerationConfig()
64
  MAX_SEED = np.iinfo(np.int32).max
 
240
  if len(prompt) > 500:
241
  return False, "โš ๏ธ Prompt is too long (max 500 characters)"
242
 
243
+ # ์ •ํ™•ํ•œ duration ๋ฒ”์œ„ ์ฒดํฌ
244
+ min_duration = self.config.min_duration
245
+ max_duration = self.config.max_duration
246
 
247
+ if duration < min_duration:
248
+ return False, f"โฑ๏ธ Duration too short (min {min_duration:.1f}s)"
249
+
250
+ if duration > max_duration:
251
+ return False, f"โฑ๏ธ Duration too long (max {max_duration:.1f}s)"
252
+
253
+ # Zero GPU ํ™˜๊ฒฝ์—์„œ๋Š” ๋” ๋ณด์ˆ˜์ ์ธ ์ œํ•œ ์ ์šฉ
254
+ if hasattr(spaces, 'GPU'): # Spaces ํ™˜๊ฒฝ ์ฒดํฌ
255
+ if duration > 2.5: # Zero GPU์—์„œ๋Š” 2.5์ดˆ๋กœ ์ œํ•œ
256
+ return False, "โฑ๏ธ In Zero GPU environment, duration is limited to 2.5s for stability"
257
+ if height > 640 or width > 640: # ํ•ด์ƒ๋„๋„ ์ œํ•œ
258
+ return False, "๐Ÿ“ In Zero GPU environment, resolution is limited to 640x640"
259
 
260
  # GPU ๋ฉ”๋ชจ๋ฆฌ ์ฒดํฌ
261
  if torch.cuda.is_available():
262
+ try:
263
+ free_memory = torch.cuda.get_device_properties(0).total_memory - torch.cuda.memory_allocated()
264
+ required_memory = (height * width * 3 * 8 * duration * self.config.fixed_fps) / (1024**3)
265
+ if free_memory < required_memory * 2:
266
+ clear_gpu_memory()
267
+ return False, "โš ๏ธ Not enough GPU memory. Try smaller dimensions or shorter duration."
268
+ except:
269
+ pass # GPU ์ฒดํฌ ์‹คํŒจ์‹œ ๊ณ„์† ์ง„ํ–‰
270
 
271
  return True, None
272
 
 
297
 
298
  def get_duration(input_image, prompt, height, width, negative_prompt,
299
  duration_seconds, guidance_scale, steps, seed, randomize_seed, progress):
300
+ # Zero GPU ํ™˜๊ฒฝ์—์„œ๋Š” ๋” ๋ณด์ˆ˜์ ์ธ ์‹œ๊ฐ„ ํ• ๋‹น
301
  base_duration = 60
302
+
303
+ # ๋‹จ๊ณ„๋ณ„ ์ถ”๊ฐ€ ์‹œ๊ฐ„
304
+ if steps > 8:
305
+ base_duration += 30
306
+ elif steps > 4:
307
  base_duration += 15
308
+
309
+ # Duration๋ณ„ ์ถ”๊ฐ€ ์‹œ๊ฐ„
310
  if duration_seconds > 2:
311
+ base_duration += 20
312
+ elif duration_seconds > 1.5:
313
+ base_duration += 10
314
 
315
+ # ํ•ด์ƒ๋„๋ณ„ ์ถ”๊ฐ€ ์‹œ๊ฐ„
316
  pixels = height * width
317
+ if pixels > 400000: # ์•ฝ 640x640
318
  base_duration += 20
319
+ elif pixels > 250000: # ์•ฝ 512x512
320
+ base_duration += 10
321
 
322
+ # Zero GPU ํ™˜๊ฒฝ์—์„œ๋Š” ์ตœ๋Œ€ 90์ดˆ๋กœ ์ œํ•œ
323
+ return min(base_duration, 90)
324
 
325
  @spaces.GPU(duration=get_duration)
326
  @measure_time
327
  def generate_video(input_image, prompt, height, width,
328
  negative_prompt=config.default_negative_prompt,
329
+ duration_seconds=1.5, guidance_scale=1, steps=4,
330
  seed=42, randomize_seed=False,
331
  progress=gr.Progress(track_tqdm=True)):
332
 
 
337
  try:
338
  progress(0.1, desc="๐Ÿ” Validating inputs...")
339
 
340
+ # Zero GPU ํ™˜๊ฒฝ์—์„œ ์ถ”๊ฐ€ ๊ฒ€์ฆ
341
+ if hasattr(spaces, 'GPU'):
342
+ logger.info(f"Zero GPU environment detected. Duration: {duration_seconds}s, Resolution: {height}x{width}")
343
+
344
  # ์ž…๋ ฅ ๊ฒ€์ฆ
345
  is_valid, error_msg = video_generator.validate_inputs(
346
  input_image, prompt, height, width, duration_seconds, steps
 
354
  progress(0.2, desc="๐ŸŽฏ Preparing image...")
355
  target_h = max(config.mod_value, (int(height) // config.mod_value) * config.mod_value)
356
  target_w = max(config.mod_value, (int(width) // config.mod_value) * config.mod_value)
357
+
358
+ # ํ”„๋ ˆ์ž„ ์ˆ˜ ๊ณ„์‚ฐ (Zero GPU ํ™˜๊ฒฝ์—์„œ ์ถ”๊ฐ€ ์ œํ•œ)
359
+ max_allowed_frames = int(2.5 * config.fixed_fps) if hasattr(spaces, 'GPU') else config.max_frames
360
+ num_frames = min(
361
+ int(round(duration_seconds * config.fixed_fps)),
362
+ max_allowed_frames
363
+ )
364
+ num_frames = np.clip(num_frames, config.min_frames, max_allowed_frames)
365
+
366
  current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
367
 
368
  # ์ด๋ฏธ์ง€ ๋ฆฌ์‚ฌ์ด์ฆˆ (๋ฉ”๋ชจ๋ฆฌ ํšจ์œจ์ )
 
403
  export_to_video(output_frames_list, video_path, fps=config.fixed_fps)
404
 
405
  progress(1.0, desc="โœจ Complete!")
406
+ logger.info(f"Video generated successfully: {num_frames} frames, {target_h}x{target_w}")
407
  return video_path, current_seed
408
 
409
  except Exception as e:
 
591
  # GPU ๋ฉ”๋ชจ๋ฆฌ ๊ฒฝ๊ณ 
592
  gr.HTML("""
593
  <div class="warning-box">
594
+ <strong>๐Ÿ’ก Zero GPU Performance Tips:</strong>
595
  <ul style="margin: 5px 0; padding-left: 20px;">
596
+ <li>Maximum duration: 2.5 seconds (limited by Zero GPU)</li>
597
+ <li>Recommended resolution: 512x512 for stable generation</li>
598
+ <li>Use 4-6 steps for optimal speed/quality balance</li>
599
+ <li>Wait between generations to avoid queue errors</li>
600
  </ul>
601
  </div>
602
  """)
 
620
  )
621
 
622
  duration_input = gr.Slider(
623
+ minimum=round(config.min_duration, 1),
624
+ maximum=2.5 if hasattr(spaces, 'GPU') else round(config.max_duration, 1), # Zero GPU ํ™˜๊ฒฝ ์ œํ•œ
625
  step=0.1,
626
+ value=1.5, # ์•ˆ์ „ํ•œ ๊ธฐ๋ณธ๊ฐ’
627
+ label="โฑ๏ธ Video Duration (seconds) - Limited to 2.5s in Zero GPU",
628
  elem_classes="slider-container"
629
  )
630
 
 
747
  )
748
 
749
  if __name__ == "__main__":
750
+ demo.queue(max_size=1).launch() # ํ ํฌ๊ธฐ ์ œํ•œ์œผ๋กœ ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ