baggy2797 commited on
Commit
abb2034
Β·
verified Β·
1 Parent(s): 49d0d1a
Files changed (1) hide show
  1. app.py +0 -160
app.py DELETED
@@ -1,160 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
- from diffusers.utils import export_to_video
5
- import os
6
- import time
7
-
8
- # Create output directory if it doesn't exist
9
- os.makedirs("outputs", exist_ok=True)
10
-
11
- # Function to load model (only once)
12
- @torch.inference_mode()
13
- def load_model():
14
- print("Loading SkyReels V2 model...")
15
- pipe = DiffusionPipeline.from_pretrained(
16
- "Skywork/SkyReels-V2-DF-1.3B-540P",
17
- torch_dtype=torch.float16,
18
- variant="fp16"
19
- )
20
-
21
- # Move to GPU and optimize
22
- device = "cuda" if torch.cuda.is_available() else "cpu"
23
- pipe.to(device)
24
-
25
- # Apply memory optimizations
26
- pipe.enable_model_cpu_offload()
27
- pipe.enable_vae_slicing()
28
-
29
- # Use DPMSolver for faster generation
30
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
31
-
32
- print(f"Model loaded successfully on {device}!")
33
- return pipe
34
-
35
- # Generate video function
36
- @torch.inference_mode()
37
- def generate_video(
38
- prompt,
39
- negative_prompt,
40
- num_frames,
41
- steps,
42
- guidance_scale,
43
- width,
44
- height,
45
- progress=gr.Progress()
46
- ):
47
- # Load the model if not already loaded
48
- if "pipe" not in generate_video.__dict__:
49
- generate_video.pipe = load_model()
50
-
51
- # Update progress
52
- progress(0, desc="Starting generation...")
53
-
54
- # Generate video frames
55
- video_frames = generate_video.pipe(
56
- prompt=prompt,
57
- negative_prompt=negative_prompt,
58
- num_inference_steps=steps,
59
- num_frames=num_frames,
60
- height=height,
61
- width=width,
62
- guidance_scale=guidance_scale,
63
- callback=lambda i, t, latents: progress((i + 1) / steps)
64
- ).frames
65
-
66
- # Save the video
67
- timestamp = int(time.time())
68
- output_path = f"outputs/generated_video_{timestamp}.mp4"
69
- export_to_video(video_frames, output_path)
70
-
71
- return output_path
72
-
73
- # Create Gradio interface
74
- with gr.Blocks() as demo:
75
- gr.Markdown("# SkyReels V2 Video Generation")
76
- gr.Markdown("Generate videos using the Skywork/SkyReels-V2-DF-1.3B-540P model")
77
-
78
- with gr.Row():
79
- with gr.Column():
80
- prompt = gr.Textbox(
81
- label="Prompt",
82
- placeholder="A panda playing guitar on a mountain top, cinematic lighting",
83
- value="A panda playing guitar on a mountain top, cinematic lighting"
84
- )
85
-
86
- negative_prompt = gr.Textbox(
87
- label="Negative Prompt",
88
- placeholder="poor quality, blurry, distorted",
89
- value="poor quality, blurry, distorted"
90
- )
91
-
92
- with gr.Row():
93
- num_frames = gr.Slider(
94
- minimum=8,
95
- maximum=24,
96
- value=16,
97
- step=1,
98
- label="Number of Frames"
99
- )
100
-
101
- steps = gr.Slider(
102
- minimum=15,
103
- maximum=50,
104
- value=25,
105
- step=1,
106
- label="Inference Steps"
107
- )
108
-
109
- with gr.Row():
110
- guidance_scale = gr.Slider(
111
- minimum=1.0,
112
- maximum=15.0,
113
- value=7.5,
114
- step=0.5,
115
- label="Guidance Scale"
116
- )
117
-
118
- with gr.Row():
119
- width = gr.Dropdown(
120
- choices=[576, 640, 704],
121
- value=576,
122
- label="Width"
123
- )
124
-
125
- height = gr.Dropdown(
126
- choices=[320, 384, 448],
127
- value=320,
128
- label="Height"
129
- )
130
-
131
- generate_btn = gr.Button("Generate Video")
132
-
133
- with gr.Column():
134
- output_video = gr.Video(label="Generated Video")
135
-
136
- generate_btn.click(
137
- fn=generate_video,
138
- inputs=[
139
- prompt,
140
- negative_prompt,
141
- num_frames,
142
- steps,
143
- guidance_scale,
144
- width,
145
- height
146
- ],
147
- outputs=output_video
148
- )
149
-
150
- gr.Markdown("""
151
- ## Usage Tips
152
- - Use detailed prompts for better results
153
- - Higher guidance scale = more prompt adherence
154
- - More inference steps = better quality but slower
155
- - First generation will take longer as it loads the model
156
- - GPU with 10GB+ VRAM recommended
157
- """)
158
-
159
- # Run the app
160
- demo.queue().launch()