beginner-unskilled2025 commited on
Commit
1ff3f41
Β·
verified Β·
1 Parent(s): 710d036

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -15
app.py CHANGED
@@ -5,28 +5,49 @@ from PIL import Image
5
  import tempfile
6
  import imageio
7
 
8
- # Use a public AnimateDiff-compatible model (Realistic Vision base)
9
  model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
10
  pipe = AnimateDiffPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
11
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
12
  pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
13
 
14
- def generate_video(image, prompt):
 
 
 
 
 
15
  image = image.convert("RGB").resize((512, 512))
16
- result = pipe(prompt=prompt, image=image, num_frames=16, guidance_scale=7.5)
 
17
  frames = result.frames
18
 
19
  video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
20
  imageio.mimsave(video_path, frames, fps=8)
21
- return video_path
22
-
23
- gr.Interface(
24
- fn=generate_video,
25
- inputs=[
26
- gr.Image(type="pil", label="Upload Image"),
27
- gr.Textbox(label="Describe Motion (Prompt)")
28
- ],
29
- outputs=gr.Video(label="Generated Video"),
30
- title="πŸŒ€ Image + Prompt to Animation",
31
- description="Upload a still image and describe how you want it animated!"
32
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import tempfile
6
  import imageio
7
 
8
+ # Load the model
9
  model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
10
  pipe = AnimateDiffPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
11
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
12
  pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
13
 
14
+ def estimate_time(num_frames):
15
+ est_seconds = int(num_frames * 2.5) # rough multiplier
16
+ return f"Estimated time: ~{est_seconds} seconds"
17
+
18
+ def generate_video(image, prompt, num_frames):
19
+ status = "Generating..."
20
  image = image.convert("RGB").resize((512, 512))
21
+
22
+ result = pipe(prompt=prompt, image=image, num_frames=num_frames, guidance_scale=7.5)
23
  frames = result.frames
24
 
25
  video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
26
  imageio.mimsave(video_path, frames, fps=8)
27
+
28
+ status = f"Done! ({num_frames} frames)"
29
+ return video_path, status
30
+
31
+ with gr.Blocks() as demo:
32
+ gr.Markdown("# πŸŒ€ Image + Prompt to Video Generator")
33
+ with gr.Row():
34
+ image_input = gr.Image(type="pil", label="Upload Image")
35
+ prompt_input = gr.Textbox(label="Describe Motion (Prompt)")
36
+ with gr.Row():
37
+ num_frames_slider = gr.Slider(8, 32, value=16, step=8, label="Number of Frames")
38
+ time_output = gr.Textbox(label="⏱️ Estimated Time", interactive=False)
39
+ with gr.Row():
40
+ generate_btn = gr.Button("🎬 Generate Video")
41
+ status_output = gr.Textbox(label="πŸ”„ Status", interactive=False)
42
+ video_output = gr.Video(label="πŸŽ₯ Output Video")
43
+
44
+ # Events
45
+ num_frames_slider.change(fn=estimate_time, inputs=num_frames_slider, outputs=time_output)
46
+
47
+ generate_btn.click(
48
+ fn=generate_video,
49
+ inputs=[image_input, prompt_input, num_frames_slider],
50
+ outputs=[video_output, status_output]
51
+ )
52
+
53
+ demo.launch()