Manjushri commited on
Commit
6312a59
·
verified ·
1 Parent(s): b66ce33

Update app.py

Browse files

Testing upscaler

Files changed (1) hide show
  1. app.py +24 -12
app.py CHANGED
@@ -9,7 +9,7 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu'
9
  torch.cuda.max_memory_allocated(device=device)
10
  torch.cuda.empty_cache()
11
 
12
- def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, upscale, high_noise_frac):
13
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
14
 
15
  if Model == "PhotoReal":
@@ -17,7 +17,8 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
17
  pipe.enable_xformers_memory_efficient_attention()
18
  pipe = pipe.to(device)
19
  torch.cuda.empty_cache()
20
- if upscale == "Yes":
 
21
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
22
  refiner.enable_xformers_memory_efficient_attention()
23
  refiner = refiner.to(device)
@@ -25,18 +26,28 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
25
  int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
26
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
27
  torch.cuda.empty_cache()
28
- return image
29
- else:
30
- image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
 
 
 
 
31
  torch.cuda.empty_cache()
 
 
32
  return image
 
 
 
 
33
 
34
  if Model == "Anime":
35
  anime = DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1")
36
  anime.enable_xformers_memory_efficient_attention()
37
  anime = anime.to(device)
38
  torch.cuda.empty_cache()
39
- if upscale == "Yes":
40
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
41
  refiner.enable_xformers_memory_efficient_attention()
42
  refiner = refiner.to(device)
@@ -55,7 +66,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
55
  disney.enable_xformers_memory_efficient_attention()
56
  disney = disney.to(device)
57
  torch.cuda.empty_cache()
58
- if upscale == "Yes":
59
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
60
  refiner.enable_xformers_memory_efficient_attention()
61
  refiner = refiner.to(device)
@@ -74,7 +85,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
74
  story.enable_xformers_memory_efficient_attention()
75
  story = story.to(device)
76
  torch.cuda.empty_cache()
77
- if upscale == "Yes":
78
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
79
  refiner.enable_xformers_memory_efficient_attention()
80
  refiner = refiner.to(device)
@@ -93,7 +104,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
93
  semi.enable_xformers_memory_efficient_attention()
94
  semi = semi.to(device)
95
  torch.cuda.empty_cache()
96
- if upscale == "Yes":
97
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
98
  refiner.enable_xformers_memory_efficient_attention()
99
  refiner = refiner.to(device)
@@ -112,7 +123,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
112
  animagine.enable_xformers_memory_efficient_attention()
113
  animagine = animagine.to(device)
114
  torch.cuda.empty_cache()
115
- if upscale == "Yes":
116
  torch.cuda.empty_cache()
117
  torch.cuda.max_memory_allocated(device=device)
118
  int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
@@ -137,7 +148,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
137
  sdxl = sdxl.to(device)
138
  torch.cuda.empty_cache()
139
 
140
- if upscale == "Yes":
141
  torch.cuda.max_memory_allocated(device=device)
142
  torch.cuda.empty_cache()
143
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
@@ -164,7 +175,8 @@ gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryB
164
  gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
165
  gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
166
  gr.Radio(["Yes", "No"], label='SDXL 1.0 Refiner: Use if the Image has too much Noise', value='No'),
167
- gr.Slider(minimum=.9, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %')],
 
168
  outputs=gr.Image(label='Generated Image'),
169
  title="Manju Dream Booth V1.6 with SDXL 1.0 Refiner - GPU",
170
  description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",
 
9
  torch.cuda.max_memory_allocated(device=device)
10
  torch.cuda.empty_cache()
11
 
12
+ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, refine, high_noise_frac, upscale):
13
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
14
 
15
  if Model == "PhotoReal":
 
17
  pipe.enable_xformers_memory_efficient_attention()
18
  pipe = pipe.to(device)
19
  torch.cuda.empty_cache()
20
+
21
+ if refine == "Yes":
22
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
23
  refiner.enable_xformers_memory_efficient_attention()
24
  refiner = refiner.to(device)
 
26
  int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
27
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
28
  torch.cuda.empty_cache()
29
+
30
+ if upscale == "Yes":
31
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
32
+ upscaler.enable_xformers_memory_efficient_attention()
33
+ upscaler = upscaler.to(device)
34
+ torch.cuda.empty_cache()
35
+ upscaled = upscaler(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
36
  torch.cuda.empty_cache()
37
+ return upscaled
38
+ else:
39
  return image
40
+ else:
41
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
42
+ torch.cuda.empty_cache()
43
+ return image
44
 
45
  if Model == "Anime":
46
  anime = DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-anime-v3.8.1")
47
  anime.enable_xformers_memory_efficient_attention()
48
  anime = anime.to(device)
49
  torch.cuda.empty_cache()
50
+ if refine == "Yes":
51
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
52
  refiner.enable_xformers_memory_efficient_attention()
53
  refiner = refiner.to(device)
 
66
  disney.enable_xformers_memory_efficient_attention()
67
  disney = disney.to(device)
68
  torch.cuda.empty_cache()
69
+ if refine == "Yes":
70
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
71
  refiner.enable_xformers_memory_efficient_attention()
72
  refiner = refiner.to(device)
 
85
  story.enable_xformers_memory_efficient_attention()
86
  story = story.to(device)
87
  torch.cuda.empty_cache()
88
+ if refine == "Yes":
89
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
90
  refiner.enable_xformers_memory_efficient_attention()
91
  refiner = refiner.to(device)
 
104
  semi.enable_xformers_memory_efficient_attention()
105
  semi = semi.to(device)
106
  torch.cuda.empty_cache()
107
+ if refine == "Yes":
108
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
109
  refiner.enable_xformers_memory_efficient_attention()
110
  refiner = refiner.to(device)
 
123
  animagine.enable_xformers_memory_efficient_attention()
124
  animagine = animagine.to(device)
125
  torch.cuda.empty_cache()
126
+ if refine == "Yes":
127
  torch.cuda.empty_cache()
128
  torch.cuda.max_memory_allocated(device=device)
129
  int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
 
148
  sdxl = sdxl.to(device)
149
  torch.cuda.empty_cache()
150
 
151
+ if refine == "Yes":
152
  torch.cuda.max_memory_allocated(device=device)
153
  torch.cuda.empty_cache()
154
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
 
175
  gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
176
  gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
177
  gr.Radio(["Yes", "No"], label='SDXL 1.0 Refiner: Use if the Image has too much Noise', value='No'),
178
+ gr.Slider(minimum=.9, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %'),
179
+ gr.Radio(["Yes", "No"], label = 'SD 2.0 X2 Latent Upscaler?', value="No")],
180
  outputs=gr.Image(label='Generated Image'),
181
  title="Manju Dream Booth V1.6 with SDXL 1.0 Refiner - GPU",
182
  description="<br><br><b/>Warning: This Demo is capable of producing NSFW content.",