Manjushri commited on
Commit
134fe05
·
verified ·
1 Parent(s): 8c42d1b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -113,12 +113,15 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
113
  animagine = animagine.to(device)
114
  torch.cuda.empty_cache()
115
  if upscale == "Yes":
116
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
117
- refiner.enable_xformers_memory_efficient_attention()
118
- refiner = refiner.to(device)
119
  torch.cuda.empty_cache()
 
120
  int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
121
- image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
 
 
 
 
 
122
  torch.cuda.empty_cache()
123
  return image
124
  else:
@@ -139,12 +142,10 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
139
  torch.cuda.empty_cache()
140
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
141
  torch.cuda.empty_cache()
142
-
143
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
144
  sdxl.enable_xformers_memory_efficient_attention()
145
  sdxl = sdxl.to(device)
146
  torch.cuda.empty_cache()
147
-
148
  refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
149
  torch.cuda.empty_cache()
150
  return refined
 
113
  animagine = animagine.to(device)
114
  torch.cuda.empty_cache()
115
  if upscale == "Yes":
 
 
 
116
  torch.cuda.empty_cache()
117
+ torch.cuda.max_memory_allocated(device=device)
118
  int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
119
+ torch.cuda.empty_cache()
120
+ animagine = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
121
+ animagine.enable_xformers_memory_efficient_attention()
122
+ animagine = animagine.to(device)
123
+ torch.cuda.empty_cache()
124
+ image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
125
  torch.cuda.empty_cache()
126
  return image
127
  else:
 
142
  torch.cuda.empty_cache()
143
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
144
  torch.cuda.empty_cache()
 
145
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
146
  sdxl.enable_xformers_memory_efficient_attention()
147
  sdxl = sdxl.to(device)
148
  torch.cuda.empty_cache()
 
149
  refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
150
  torch.cuda.empty_cache()
151
  return refined