SketchAgent / animate /animate.py
faxnoprinter's picture
Create animate/animate.py
a1f43d3 verified
import gradio as gr
import os
from PIL import Image
from pathlib import Path
import tempfile
import uuid
# Hugging Face video generation inference
from diffusers import AnimateDiffPipeline
import torch
# Optional: Replace with your model of choice from HF
ANIMATEDIFF_MODEL = "guoyww/animatediff-light"
pipe = AnimateDiffPipeline.from_pretrained(
ANIMATEDIFF_MODEL,
torch_dtype=torch.float16,
variant="fp16"
).to("cuda" if torch.cuda.is_available() else "cpu")
def generate_motion_video(concept, motion_prompt, sketch_img):
if sketch_img is None:
return "Please upload or generate a sketch first.", None
unique_id = str(uuid.uuid4())
out_dir = Path(tempfile.gettempdir()) / f"motion_{unique_id}"
out_dir.mkdir(exist_ok=True)
# Generate text prompt from sketch concept and motion
prompt = (
f"You are given a sketch of a {concept} composed of strokes over a grid. "
f"Imagine this sketch being brought to life in a short animation.\n\n"
f"The motion task is: '{motion_prompt}'\n\n"
f"Generate a short animation (5–8 seconds) that shows this action applied naturally "
f"to the sketch, while keeping the line-drawn aesthetic."
)
# AnimateDiff generation
video = pipe(prompt, num_frames=16).frames[0] # Get 1st sample’s frames
gif_path = out_dir / f"{concept.replace(' ', '_')}_motion.gif"
video[0].save(
gif_path,
save_all=True,
append_images=video[1:],
duration=100,
loop=0
)
return "GIF created successfully!", str(gif_path)