Inmental commited on
Commit
d97b2a0
·
verified ·
1 Parent(s): 86e3a32

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. gradio_sketch2imagehd.py +12 -42
gradio_sketch2imagehd.py CHANGED
@@ -112,42 +112,6 @@ def normalize_image(image, range_from=(-1, 1)):
112
 
113
  return image_t
114
 
115
-
116
- def run(image, prompt, prompt_template, style_name, seed, val_r):
117
- print(f"prompt: {prompt}")
118
- print("sketch updated")
119
- print(image)
120
- if image is None:
121
- ones = Image.new("L", (1024, 1024), 255)
122
- temp_uri = pil_image_to_data_uri(ones)
123
- return ones, gr.update(link=temp_uri), gr.update(link=temp_uri)
124
- print(f"Input Image Size: {image.size}")
125
- prompt = prompt_template.replace("{prompt}", prompt)
126
- image = image.convert("RGB")
127
- #image_t = normalize_image(image, range_from=(-1, 1))
128
-
129
- print(f"r_val={val_r}, seed={seed}")
130
- image_t = F.to_tensor(image) < 0.5
131
- with torch.no_grad():
132
-
133
- c_t = image_t.unsqueeze(0).cuda().float()
134
- torch.manual_seed(seed)
135
- B, C, H, W = c_t.shape
136
- noise = torch.randn((1, 4, H // 8, W // 8), device=c_t.device)
137
-
138
- print("Calling Pix2Pix model... ct: {}, prompt: {}, deterministic: False, r: {}, noise_map: {}".format(c_t.shape, prompt, val_r, noise.shape))
139
- output_image = pix2pix_model(c_t, prompt, deterministic=False, r=val_r, noise_map=noise)
140
- output_pil = F.to_pil_image(output_image[0].cpu() * 0.5 + 0.5)
141
- input_sketch_uri = pil_image_to_data_uri(Image.fromarray(255 - np.array(image)))
142
- output_image_uri = pil_image_to_data_uri(output_pil)
143
-
144
- print(f"Output Image Size: {output_pil.size}")
145
- return (
146
- output_pil,
147
- gr.update(link=input_sketch_uri),
148
- gr.update(link=output_image_uri),
149
- )
150
-
151
  def run(image, prompt, prompt_template, style_name, seed, val_r):
152
  """Runs the main image processing pipeline."""
153
  logging.debug("Running model inference...")
@@ -207,6 +171,12 @@ def run(image, prompt, prompt_template, style_name, seed, val_r):
207
 
208
  def gradio_interface(image, prompt, style_name, seed, val_r):
209
  """Gradio interface function to handle inputs and generate outputs."""
 
 
 
 
 
 
210
  prompt_template = STYLES.get(style_name, STYLES[DEFAULT_STYLE_NAME])
211
  result_image = run(image, prompt, prompt_template, style_name, seed, val_r)
212
  return result_image
@@ -215,13 +185,13 @@ def gradio_interface(image, prompt, style_name, seed, val_r):
215
  interface = gr.Interface(
216
  fn=gradio_interface,
217
  inputs=[
218
- gr.Image(source="upload", type="pil", label="Sketch Image"),
219
- gr.Textbox(lines=2, placeholder="Enter a text prompt (optional)", label="Prompt"),
220
- gr.Dropdown(choices=list(STYLES.keys()), value=DEFAULT_STYLE_NAME, label="Style"),
221
- gr.Slider(minimum=0, maximum=MAX_SEED, step=1, default=DEFAULT_SEED, label="Seed"),
222
- gr.Slider(minimum=0.0, maximum=1.0, step=0.01, default=VAL_R_DEFAULT, label="Sketch Guidance")
223
  ],
224
- outputs=gr.Image(label="Generated Image"),
225
  title="Sketch to Image Generation",
226
  description="Upload a sketch and generate an image based on a prompt and style."
227
  )
 
112
 
113
  return image_t
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  def run(image, prompt, prompt_template, style_name, seed, val_r):
116
  """Runs the main image processing pipeline."""
117
  logging.debug("Running model inference...")
 
171
 
172
  def gradio_interface(image, prompt, style_name, seed, val_r):
173
  """Gradio interface function to handle inputs and generate outputs."""
174
+ # Endpoint: `image` - Input image from user (Sketch Image)
175
+ # Endpoint: `prompt` - Text prompt (optional)
176
+ # Endpoint: `style_name` - Selected style from dropdown
177
+ # Endpoint: `seed` - Seed for reproducibility
178
+ # Endpoint: `val_r` - Sketch guidance value
179
+
180
  prompt_template = STYLES.get(style_name, STYLES[DEFAULT_STYLE_NAME])
181
  result_image = run(image, prompt, prompt_template, style_name, seed, val_r)
182
  return result_image
 
185
  interface = gr.Interface(
186
  fn=gradio_interface,
187
  inputs=[
188
+ gr.Image(source="upload", type="pil", label="Sketch Image"), # Endpoint: `image`
189
+ gr.Textbox(lines=2, placeholder="Enter a text prompt (optional)", label="Prompt"), # Endpoint: `prompt`
190
+ gr.Dropdown(choices=list(STYLES.keys()), value=DEFAULT_STYLE_NAME, label="Style"), # Endpoint: `style_name`
191
+ gr.Slider(minimum=0, maximum=MAX_SEED, step=1, default=DEFAULT_SEED, label="Seed"), # Endpoint: `seed`
192
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.01, default=VAL_R_DEFAULT, label="Sketch Guidance") # Endpoint: `val_r`
193
  ],
194
+ outputs=gr.Image(label="Generated Image"), # Output endpoint: `result_image`
195
  title="Sketch to Image Generation",
196
  description="Upload a sketch and generate an image based on a prompt and style."
197
  )