faxnoprinter commited on
Commit
a1f43d3
·
verified ·
1 Parent(s): 7d35214

Create animate/animate.py

Browse files
Files changed (1) hide show
  1. animate/animate.py +51 -0
animate/animate.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from PIL import Image
4
+ from pathlib import Path
5
+ import tempfile
6
+ import uuid
7
+
8
+ # Hugging Face video generation inference
9
+ from diffusers import AnimateDiffPipeline
10
+ import torch
11
+
12
+ # Optional: Replace with your model of choice from HF
13
+ ANIMATEDIFF_MODEL = "guoyww/animatediff-light"
14
+
15
+ pipe = AnimateDiffPipeline.from_pretrained(
16
+ ANIMATEDIFF_MODEL,
17
+ torch_dtype=torch.float16,
18
+ variant="fp16"
19
+ ).to("cuda" if torch.cuda.is_available() else "cpu")
20
+
21
+
22
+ def generate_motion_video(concept, motion_prompt, sketch_img):
23
+ if sketch_img is None:
24
+ return "Please upload or generate a sketch first.", None
25
+
26
+ unique_id = str(uuid.uuid4())
27
+ out_dir = Path(tempfile.gettempdir()) / f"motion_{unique_id}"
28
+ out_dir.mkdir(exist_ok=True)
29
+
30
+ # Generate text prompt from sketch concept and motion
31
+ prompt = (
32
+ f"You are given a sketch of a {concept} composed of strokes over a grid. "
33
+ f"Imagine this sketch being brought to life in a short animation.\n\n"
34
+ f"The motion task is: '{motion_prompt}'\n\n"
35
+ f"Generate a short animation (5–8 seconds) that shows this action applied naturally "
36
+ f"to the sketch, while keeping the line-drawn aesthetic."
37
+ )
38
+
39
+ # AnimateDiff generation
40
+ video = pipe(prompt, num_frames=16).frames[0] # Get 1st sample’s frames
41
+
42
+ gif_path = out_dir / f"{concept.replace(' ', '_')}_motion.gif"
43
+ video[0].save(
44
+ gif_path,
45
+ save_all=True,
46
+ append_images=video[1:],
47
+ duration=100,
48
+ loop=0
49
+ )
50
+
51
+ return "GIF created successfully!", str(gif_path)