Spaces:
Runtime error
Runtime error
Commit
·
f129e08
1
Parent(s):
547f21c
better mask matching
Browse files
app.py
CHANGED
@@ -125,11 +125,11 @@ def inpaint(input_img, input_img_with_mask, prompt):
|
|
125 |
input_img_256 = input_img.convert('RGB').resize((256, 256), resample=Image.BICUBIC)
|
126 |
input_img_64 = input_img.convert('RGB').resize((64, 64), resample=Image.BICUBIC)
|
127 |
|
128 |
-
input_img_with_mask_converted = input_img.convert('RGBA')
|
129 |
-
input_img_with_mask_64 = input_img_with_mask_converted.getchannel( 'A' ) # Mode 'L'
|
130 |
|
131 |
-
input_img_with_mask_64 =
|
132 |
-
|
|
|
133 |
# Source image we are inpainting
|
134 |
source_image_256 = pil_to_numpy(input_img_256)
|
135 |
source_image_64 = pil_to_numpy(input_img_64)
|
@@ -137,14 +137,19 @@ def inpaint(input_img, input_img_with_mask, prompt):
|
|
137 |
# Since gradio doesn't supply which pixels were drawn, we need to find it ourselves!
|
138 |
# Assuming that all black pixels are meant for inpainting.
|
139 |
# input_img_with_mask_64 = input_img_with_mask.convert('L').resize((64, 64), resample=Image.BICUBIC)
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
142 |
source_mask_64_img = tensor_to_image(source_mask_64)
|
143 |
|
144 |
# The mask should always be a boolean 64x64 mask, and then we
|
145 |
# can upsample it for the second stage.
|
146 |
source_mask_64 = source_mask_64.unsqueeze(0)
|
147 |
-
source_mask_256 =
|
|
|
148 |
|
149 |
|
150 |
##############################
|
|
|
125 |
input_img_256 = input_img.convert('RGB').resize((256, 256), resample=Image.BICUBIC)
|
126 |
input_img_64 = input_img.convert('RGB').resize((64, 64), resample=Image.BICUBIC)
|
127 |
|
128 |
+
input_img_with_mask_converted = input_img.convert('RGBA').input_img_with_mask_converted.getchannel( 'A' ) # Mode 'L'
|
|
|
129 |
|
130 |
+
input_img_with_mask_64 = input_img_with_mask_converted.resize((64, 64), resample=Image.BICUBIC)
|
131 |
+
input_img_with_mask_256 = input_img_with_mask_converted.resize((256, 256), resample=Image.BICUBIC)
|
132 |
+
# return input_img, input_img_with_mask_64
|
133 |
# Source image we are inpainting
|
134 |
source_image_256 = pil_to_numpy(input_img_256)
|
135 |
source_image_64 = pil_to_numpy(input_img_64)
|
|
|
137 |
# Since gradio doesn't supply which pixels were drawn, we need to find it ourselves!
|
138 |
# Assuming that all black pixels are meant for inpainting.
|
139 |
# input_img_with_mask_64 = input_img_with_mask.convert('L').resize((64, 64), resample=Image.BICUBIC)
|
140 |
+
gray_scale_source_image_64 = image_to_tensor(input_img_with_mask_64)
|
141 |
+
gray_scale_source_image_256 = image_to_tensor(input_img_with_mask_256)
|
142 |
+
|
143 |
+
source_mask_64 = (gray_scale_source_image_64!=0).float()
|
144 |
+
source_mask_256 = (gray_scale_source_image_256!=0).float()
|
145 |
+
|
146 |
source_mask_64_img = tensor_to_image(source_mask_64)
|
147 |
|
148 |
# The mask should always be a boolean 64x64 mask, and then we
|
149 |
# can upsample it for the second stage.
|
150 |
source_mask_64 = source_mask_64.unsqueeze(0)
|
151 |
+
source_mask_256 = source_mask_256.unsqueeze(0)
|
152 |
+
# source_mask_256 = F.interpolate(source_mask_64, (256, 256), mode='nearest')
|
153 |
|
154 |
|
155 |
##############################
|