Meaowangxi commited on
Commit
8f14f01
·
verified ·
1 Parent(s): de61b06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -40,17 +40,17 @@ noise_scheduler = DDIMScheduler(
40
  steps_offset=1,
41
  )
42
  # 1.2 vae
43
- vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float16)
44
  # 1.3 ControlNet
45
  ## 1.3.1 load controlnet_softEdge
46
- controlnet_softEdge = ControlNetModel.from_pretrained(controlnet_softEdge_model_path, torch_dtype=torch.float16)
47
  ## 1.3.2 load controlnet_depth
48
- controlnet_depth = ControlNetModel.from_pretrained(controlnet_depth_model_path, torch_dtype=torch.float16)
49
  # 1.4 load SD pipeline
50
  pipe_softEdge = StableDiffusionControlNetPipeline.from_pretrained(
51
  base_model_path,
52
  controlnet=controlnet_softEdge,
53
- torch_dtype=torch.float16,
54
  scheduler=noise_scheduler,
55
  vae=vae,
56
  feature_extractor=None,
@@ -59,7 +59,7 @@ pipe_softEdge = StableDiffusionControlNetPipeline.from_pretrained(
59
  pipe_depth = StableDiffusionControlNetPipeline.from_pretrained(
60
  base_model_path,
61
  controlnet=controlnet_depth,
62
- torch_dtype=torch.float16,
63
  scheduler=noise_scheduler,
64
  vae=vae,
65
  feature_extractor=None,
@@ -88,7 +88,7 @@ def task1_StyleTransfer(photo, blur_radius, sketch):
88
 
89
  Control_factor = 1.2
90
  IP_factor = 0.6
91
- ip_model = IPAdapter(pipe_depth, image_encoder_path, ip_ckpt, device, Control_factor=Control_factor, IP_factor=IP_factor,dtype=torch.float32)
92
 
93
  depth_image= Image.open(sketch)
94
  img_array = np.array(depth_image)
 
40
  steps_offset=1,
41
  )
42
  # 1.2 vae
43
+ vae = AutoencoderKL.from_pretrained(vae_model_path).to(dtype=torch.float32)
44
  # 1.3 ControlNet
45
  ## 1.3.1 load controlnet_softEdge
46
+ controlnet_softEdge = ControlNetModel.from_pretrained(controlnet_softEdge_model_path, torch_dtype=torch.float32)
47
  ## 1.3.2 load controlnet_depth
48
+ controlnet_depth = ControlNetModel.from_pretrained(controlnet_depth_model_path, torch_dtype=torch.float32)
49
  # 1.4 load SD pipeline
50
  pipe_softEdge = StableDiffusionControlNetPipeline.from_pretrained(
51
  base_model_path,
52
  controlnet=controlnet_softEdge,
53
+ torch_dtype=torch.float32,
54
  scheduler=noise_scheduler,
55
  vae=vae,
56
  feature_extractor=None,
 
59
  pipe_depth = StableDiffusionControlNetPipeline.from_pretrained(
60
  base_model_path,
61
  controlnet=controlnet_depth,
62
+ torch_dtype=torch.float32,
63
  scheduler=noise_scheduler,
64
  vae=vae,
65
  feature_extractor=None,
 
88
 
89
  Control_factor = 1.2
90
  IP_factor = 0.6
91
+ ip_model = IPAdapter(pipe_depth, image_encoder_path, ip_ckpt, device, Control_factor=Control_factor, IP_factor=IP_factor)
92
 
93
  depth_image= Image.open(sketch)
94
  img_array = np.array(depth_image)