Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse filesFixed mysterious indent issue
app.py
CHANGED
@@ -102,7 +102,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
102 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
103 |
torch.cuda.empty_cache()
|
104 |
|
105 |
-
|
106 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
107 |
refiner.enable_xformers_memory_efficient_attention()
|
108 |
refiner = refiner.to(device)
|
@@ -141,7 +141,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
141 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
142 |
torch.cuda.empty_cache()
|
143 |
|
144 |
-
|
145 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
146 |
refiner.enable_xformers_memory_efficient_attention()
|
147 |
refiner = refiner.to(device)
|
@@ -182,7 +182,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
182 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
|
183 |
torch.cuda.empty_cache()
|
184 |
|
185 |
-
|
186 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
187 |
refiner.enable_xformers_memory_efficient_attention()
|
188 |
refiner = refiner.to(device)
|
@@ -205,9 +205,9 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
205 |
return upscaled
|
206 |
else:
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
|
212 |
if Model == "Animagine XL 3.0":
|
213 |
animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
|
@@ -226,7 +226,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
226 |
image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
227 |
torch.cuda.empty_cache()
|
228 |
|
229 |
-
|
230 |
animagine = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
231 |
animagine.enable_xformers_memory_efficient_attention()
|
232 |
animagine = animagine.to(device)
|
@@ -273,7 +273,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
|
|
273 |
refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
|
274 |
torch.cuda.empty_cache()
|
275 |
|
276 |
-
|
277 |
sdxl = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
278 |
sdxl.enable_xformers_memory_efficient_attention()
|
279 |
sdxl = sdxl.to(device)
|
|
|
102 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
103 |
torch.cuda.empty_cache()
|
104 |
|
105 |
+
if upscale == "Yes":
|
106 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
107 |
refiner.enable_xformers_memory_efficient_attention()
|
108 |
refiner = refiner.to(device)
|
|
|
141 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
142 |
torch.cuda.empty_cache()
|
143 |
|
144 |
+
if upscale == "Yes":
|
145 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
146 |
refiner.enable_xformers_memory_efficient_attention()
|
147 |
refiner = refiner.to(device)
|
|
|
182 |
image = refiner(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
|
183 |
torch.cuda.empty_cache()
|
184 |
|
185 |
+
if upscale == "Yes":
|
186 |
refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
187 |
refiner.enable_xformers_memory_efficient_attention()
|
188 |
refiner = refiner.to(device)
|
|
|
205 |
return upscaled
|
206 |
else:
|
207 |
|
208 |
+
image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
|
209 |
+
torch.cuda.empty_cache()
|
210 |
+
return image
|
211 |
|
212 |
if Model == "Animagine XL 3.0":
|
213 |
animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
|
|
|
226 |
image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
|
227 |
torch.cuda.empty_cache()
|
228 |
|
229 |
+
if upscale == "Yes":
|
230 |
animagine = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
231 |
animagine.enable_xformers_memory_efficient_attention()
|
232 |
animagine = animagine.to(device)
|
|
|
273 |
refined = sdxl(Prompt, negative_prompt=negative_prompt, image=image, denoising_start=high_noise_frac).images[0]
|
274 |
torch.cuda.empty_cache()
|
275 |
|
276 |
+
if upscale == "Yes":
|
277 |
sdxl = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
|
278 |
sdxl.enable_xformers_memory_efficient_attention()
|
279 |
sdxl = sdxl.to(device)
|