Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -128,14 +128,11 @@ def inference(source_images,
|
|
128 |
text_prompt, negative_prompt,
|
129 |
pipe, vae, guidance_scale,
|
130 |
h, w, random_seed)->List[PIL.Image.Image]:
|
131 |
-
print("来到inference里")
|
132 |
torch.manual_seed(random_seed)
|
133 |
-
print("设置完随机种子,准备移动pipe各组件到gpu")
|
134 |
|
135 |
pipe.vae.to(DEVICE)
|
136 |
pipe.transformer.to(DEVICE)
|
137 |
pipe.controlnet_transformer.to(DEVICE)
|
138 |
-
print("pipe.vae/transformer/controlnet移动到GPU里")
|
139 |
|
140 |
source_pixel_values = source_images/127.5 - 1.0
|
141 |
source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
|
@@ -164,7 +161,6 @@ def inference(source_images,
|
|
164 |
image_latents = None
|
165 |
latents = source_latents
|
166 |
|
167 |
-
print("准备调用pipe()函数")
|
168 |
video = pipe(
|
169 |
prompt = text_prompt,
|
170 |
negative_prompt = negative_prompt,
|
@@ -173,7 +169,7 @@ def inference(source_images,
|
|
173 |
height = h,
|
174 |
width = w,
|
175 |
num_frames = f,
|
176 |
-
num_inference_steps =
|
177 |
interval = 6,
|
178 |
guidance_scale = guidance_scale,
|
179 |
generator = torch.Generator(device=DEVICE).manual_seed(random_seed)
|
@@ -220,7 +216,6 @@ def process_video(video_file, image_file, positive_prompt, negative_prompt, guid
|
|
220 |
source_images=source_images
|
221 |
target_images=first_frame[None,None,...]
|
222 |
|
223 |
-
print("所有事都干完了,准备inference。")
|
224 |
video+=inference(source_images, \
|
225 |
target_images, positive_prompt, \
|
226 |
negative_prompt, pipe, pipe.vae, \
|
|
|
128 |
text_prompt, negative_prompt,
|
129 |
pipe, vae, guidance_scale,
|
130 |
h, w, random_seed)->List[PIL.Image.Image]:
|
|
|
131 |
torch.manual_seed(random_seed)
|
|
|
132 |
|
133 |
pipe.vae.to(DEVICE)
|
134 |
pipe.transformer.to(DEVICE)
|
135 |
pipe.controlnet_transformer.to(DEVICE)
|
|
|
136 |
|
137 |
source_pixel_values = source_images/127.5 - 1.0
|
138 |
source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
|
|
|
161 |
image_latents = None
|
162 |
latents = source_latents
|
163 |
|
|
|
164 |
video = pipe(
|
165 |
prompt = text_prompt,
|
166 |
negative_prompt = negative_prompt,
|
|
|
169 |
height = h,
|
170 |
width = w,
|
171 |
num_frames = f,
|
172 |
+
num_inference_steps = 30,
|
173 |
interval = 6,
|
174 |
guidance_scale = guidance_scale,
|
175 |
generator = torch.Generator(device=DEVICE).manual_seed(random_seed)
|
|
|
216 |
source_images=source_images
|
217 |
target_images=first_frame[None,None,...]
|
218 |
|
|
|
219 |
video+=inference(source_images, \
|
220 |
target_images, positive_prompt, \
|
221 |
negative_prompt, pipe, pipe.vae, \
|