File size: 1,592 Bytes
a1f43d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import gradio as gr
import os
from PIL import Image
from pathlib import Path
import tempfile
import uuid

# Hugging Face video generation inference
from diffusers import AnimateDiffPipeline
import torch

# Optional: Replace with your model of choice from HF
ANIMATEDIFF_MODEL = "guoyww/animatediff-light"

pipe = AnimateDiffPipeline.from_pretrained(
    ANIMATEDIFF_MODEL,
    torch_dtype=torch.float16,
    variant="fp16"
).to("cuda" if torch.cuda.is_available() else "cpu")


def generate_motion_video(concept, motion_prompt, sketch_img):
    if sketch_img is None:
        return "Please upload or generate a sketch first.", None

    unique_id = str(uuid.uuid4())
    out_dir = Path(tempfile.gettempdir()) / f"motion_{unique_id}"
    out_dir.mkdir(exist_ok=True)

    # Generate text prompt from sketch concept and motion
    prompt = (
        f"You are given a sketch of a {concept} composed of strokes over a grid. "
        f"Imagine this sketch being brought to life in a short animation.\n\n"
        f"The motion task is: '{motion_prompt}'\n\n"
        f"Generate a short animation (5–8 seconds) that shows this action applied naturally "
        f"to the sketch, while keeping the line-drawn aesthetic."
    )

    # AnimateDiff generation
    video = pipe(prompt, num_frames=16).frames[0]  # Get 1st sample’s frames

    gif_path = out_dir / f"{concept.replace(' ', '_')}_motion.gif"
    video[0].save(
        gif_path,
        save_all=True,
        append_images=video[1:],
        duration=100,
        loop=0
    )

    return "GIF created successfully!", str(gif_path)