Manjushri commited on
Commit
2d7fd47
·
verified ·
1 Parent(s): d77ecd6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -109,6 +109,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
109
  return image
110
 
111
  if Model == "SDXL 1.0":
 
112
  torch.cuda.max_memory_allocated(device=device)
113
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
114
  sdxl.enable_xformers_memory_efficient_attention()
@@ -116,6 +117,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
116
  torch.cuda.empty_cache()
117
 
118
  if upscale == "Yes":
 
119
  torch.cuda.max_memory_allocated(device=device)
120
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
121
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
 
109
  return image
110
 
111
  if Model == "SDXL 1.0":
112
+ torch.cuda.empty_cache()
113
  torch.cuda.max_memory_allocated(device=device)
114
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
115
  sdxl.enable_xformers_memory_efficient_attention()
 
117
  torch.cuda.empty_cache()
118
 
119
  if upscale == "Yes":
120
+ torch.cuda.empty_cache()
121
  torch.cuda.max_memory_allocated(device=device)
122
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
123
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")