ddosxd commited on
Commit
c9991df
·
verified ·
1 Parent(s): 0a7de84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -37
app.py CHANGED
@@ -47,14 +47,24 @@ if torch.cuda.is_available():
47
  add_watermarker=False,
48
  variant="fp16"
49
  )
 
 
 
 
 
 
 
50
  if ENABLE_CPU_OFFLOAD:
51
  pipe.enable_model_cpu_offload()
 
52
  else:
53
  pipe.to(device)
 
54
  print("Loaded on Device!")
55
 
56
  if USE_TORCH_COMPILE:
57
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
58
  print("Model Compiled!")
59
 
60
 
@@ -70,33 +80,9 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
70
  return seed
71
 
72
 
73
- def generate(prompt: str,
74
- negative_prompt: str = "",
75
- use_negative_prompt: bool = False,
76
- seed: int = 0,
77
- width: int = 1024,
78
- height: int = 1024,
79
- guidance_scale: float = 3,
80
- randomize_seed: bool = False,
81
- use_resolution_binning: bool = True,
82
- progress=gr.Progress(track_tqdm=True)):
83
- if check_text(prompt, negative_prompt):
84
- return 'NSFW Detection'
85
- return _generate(
86
- prompt,
87
- negative_prompt,
88
- use_negative_prompt,
89
- seed,
90
- width,
91
- height,
92
- guidance_scale,
93
- randomize_seed,
94
- use_resolution_binning,
95
- progress
96
- )
97
 
98
  @spaces.GPU(enable_queue=True)
99
- def _generate(
100
  prompt: str,
101
  negative_prompt: str = "",
102
  use_negative_prompt: bool = False,
@@ -115,18 +101,22 @@ def _generate(
115
  if not use_negative_prompt:
116
  negative_prompt = "" # type: ignore
117
  negative_prompt += default_negative
118
- images = pipe(
119
- prompt=prompt,
120
- negative_prompt=negative_prompt,
121
- width=width,
122
- height=height,
123
- guidance_scale=guidance_scale,
124
- num_inference_steps=25,
125
- generator=generator,
126
- num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
127
- use_resolution_binning=use_resolution_binning,
128
- output_type="pil",
129
- ).images
 
 
 
 
130
 
131
  image_paths = [save_image(img) for img in images]
132
  return image_paths, seed
 
47
  add_watermarker=False,
48
  variant="fp16"
49
  )
50
+ pipe2 = DiffusionPipeline.from_pretrained(
51
+ "SG161222/RealVisXL_V4.0",
52
+ torch_dtype=torch.float16,
53
+ use_safetensors=True,
54
+ add_watermarker=False,
55
+ variant="fp16"
56
+ )
57
  if ENABLE_CPU_OFFLOAD:
58
  pipe.enable_model_cpu_offload()
59
+ pipe2.enable_model_cpu_offload()
60
  else:
61
  pipe.to(device)
62
+ pipe2.to(device)
63
  print("Loaded on Device!")
64
 
65
  if USE_TORCH_COMPILE:
66
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
67
+ pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
68
  print("Model Compiled!")
69
 
70
 
 
80
  return seed
81
 
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  @spaces.GPU(enable_queue=True)
85
+ def generate(
86
  prompt: str,
87
  negative_prompt: str = "",
88
  use_negative_prompt: bool = False,
 
101
  if not use_negative_prompt:
102
  negative_prompt = "" # type: ignore
103
  negative_prompt += default_negative
104
+
105
+ options = {
106
+ "prompt":prompt,
107
+ "negative_prompt":negative_prompt,
108
+ "width":width,
109
+ "height":height,
110
+ "guidance_scale":guidance_scale,
111
+ "num_inference_steps":25,
112
+ "generator":generator,
113
+ "num_images_per_prompt":NUM_IMAGES_PER_PROMPT,
114
+ "use_resolution_binning":use_resolution_binning,
115
+ "output_type":"pil",
116
+
117
+ }
118
+
119
+ images = pipe(**options).images+pipe2(**options).images
120
 
121
  image_paths = [save_image(img) for img in images]
122
  return image_paths, seed