PengWeixuanSZU commited on
Commit
7eddf49
·
verified ·
1 Parent(s): 49b23e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -138,10 +138,10 @@ def inference(source_images,
138
  pipe.controlnet_transformer.to(DEVICE)
139
 
140
  source_pixel_values = source_images/127.5 - 1.0
141
- source_pixel_values = source_pixel_values.to(torch.float16).to("cuda:0")
142
  if target_images is not None:
143
  target_pixel_values = target_images/127.5 - 1.0
144
- target_pixel_values = target_pixel_values.to(torch.float16).to("cuda:0")
145
  bsz,f,h,w,c = source_pixel_values.shape
146
 
147
  with torch.no_grad():
@@ -172,7 +172,7 @@ def inference(source_images,
172
  height = h,
173
  width = w,
174
  num_frames = f,
175
- num_inference_steps = 50,
176
  interval = 6,
177
  guidance_scale = guidance_scale,
178
  generator = torch.Generator(device=device).manual_seed(random_seed)
@@ -237,14 +237,6 @@ def process_video(video_file, image_file, positive_prompt, negative_prompt, guid
237
  PIPE=init_pipe()
238
 
239
 
240
- def process_video(video_file, image_file, positive_prompt, negative_prompt, guidance, random_seed, choice, progress=gr.Progress(track_tqdm=True))->str:
241
- return "123456"
242
-
243
- def get_prompt(file:str):
244
- with open(file,'r') as f:
245
- a=f.readlines()
246
- return a #a[0]:positive prompt, a[1] negative prompt
247
-
248
  with gr.Blocks() as demo:
249
  gr.Markdown(
250
  """
 
138
  pipe.controlnet_transformer.to(DEVICE)
139
 
140
  source_pixel_values = source_images/127.5 - 1.0
141
+ source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
142
  if target_images is not None:
143
  target_pixel_values = target_images/127.5 - 1.0
144
+ target_pixel_values = target_pixel_values.to(torch.float16).to(DEVICE)
145
  bsz,f,h,w,c = source_pixel_values.shape
146
 
147
  with torch.no_grad():
 
172
  height = h,
173
  width = w,
174
  num_frames = f,
175
+ num_inference_steps = 5,#!!!
176
  interval = 6,
177
  guidance_scale = guidance_scale,
178
  generator = torch.Generator(device=device).manual_seed(random_seed)
 
237
  PIPE=init_pipe()
238
 
239
 
 
 
 
 
 
 
 
 
240
  with gr.Blocks() as demo:
241
  gr.Markdown(
242
  """