File size: 2,992 Bytes
2e07021
 
2193261
 
 
 
 
 
9369d0f
 
 
2e07021
1c7efde
7e79b1c
2193261
 
 
2e07021
9369d0f
2193261
9369d0f
 
 
 
2e07021
9369d0f
 
 
 
 
 
 
2193261
 
 
 
9369d0f
2193261
 
 
 
 
 
 
 
 
 
 
9369d0f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa89711
 
 
 
9369d0f
 
aa89711
 
9369d0f
1c7efde
 
aa89711
 
c6f5730
49b987b
9369d0f
aa89711
9369d0f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
from huggingface_hub import login
import torch
import torchaudio
from einops import rearrange
import gradio as gr
from stable_audio_tools import get_pretrained_model
from stable_audio_tools.inference.generation import generate_diffusion_cond
from diffusers import DiffusionPipeline
from PIL import Image
from moviepy.editor import AudioFileClip, ImageClip

# Authenticate
token = os.getenv("HUGGINGFACE_TOKEN")
if not token:
    raise RuntimeError("HUGGINGFACE_TOKEN not set")
login(token=token, add_to_git_credential=False)

# Load audio model
device = "cuda" if torch.cuda.is_available() else "cpu"
audio_model, audio_config = get_pretrained_model("stabilityai/stable-audio-open-small")
audio_model = audio_model.to(device)
sample_rate = audio_config["sample_rate"]
sample_size = audio_config["sample_size"]

# Load image model (Kandinsky)
image_pipe = DiffusionPipeline.from_pretrained(
    "kandinsky-community/kandinsky-3",
    torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
).to(device)

# Generate audio
def generate_audio(prompt):
    conditioning = [{"prompt": prompt, "seconds_total": 11}]
    with torch.no_grad():
        output = generate_diffusion_cond(
            audio_model,
            steps=8,
            conditioning=conditioning,
            sample_size=sample_size,
            device=device
        )
    output = rearrange(output, "b d n -> d (b n)")
    output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
    path = "output.wav"
    torchaudio.save(path, output, sample_rate)
    return path

# Generate image
def generate_image(prompt):
    image = image_pipe(prompt=prompt, height=500, width=500).images[0]
    image_path = "output.png"
    image.save(image_path)
    return image_path

# Combine audio + image into mp4
def combine_to_video(image_path, audio_path, output_path="output.mp4"):
    clip = ImageClip(image_path).set_duration(12).set_audio(AudioFileClip(audio_path))
    clip = clip.set_fps(1)
    clip.write_videofile(output_path, codec="libx264", audio_codec="aac", fps=1)
    return output_path

# Unified generation
def generate_av(prompt):
    audio_path = generate_audio(prompt)
    image_path = generate_image(prompt)
    video_path = combine_to_video(image_path, audio_path)
    return video_path

# UI
interface = gr.Interface(
    fn=generate_av,
    inputs=gr.Textbox(
        label="🎀 Prompt your sonic art here",
        placeholder="e.g. 'drunk driving with mario and yung lean'"
    ),
    outputs=gr.Video(
        label="🧠 Generated Audiovisual Clip"
    ),
    title='🌐 Hot Prompts in Your Area: "My Husband Is Dead"',
    description="Enter a fun sound idea for music art. Returns a synced image + audio mp4.",
    examples=[
        "ghosts peeing in a server room",
        "tech startup boss villain entrance music",
        "AI doing acid in a technofeudalist dystopia"
    ],
    css="style.css"
)

interface.launch()