Spaces:
Runtime error
Runtime error
Commit
·
8663a23
1
Parent(s):
eb80da0
more accurate masking
Browse files
app.py
CHANGED
@@ -129,7 +129,7 @@ def inpaint(input_img, prompt):
|
|
129 |
|
130 |
input_img_with_mask_64 = input_img_with_mask_converted.resize((64, 64), resample=Image.BICUBIC)
|
131 |
# TODO: make 256x256 mask more accurate when upscaling?
|
132 |
-
|
133 |
# return input_img, input_img_with_mask_64
|
134 |
# Source image we are inpainting
|
135 |
source_image_256 = pil_to_numpy(input_img_256)
|
@@ -139,18 +139,18 @@ def inpaint(input_img, prompt):
|
|
139 |
# Assuming that all black pixels are meant for inpainting.
|
140 |
# input_img_with_mask_64 = input_img_with_mask.convert('L').resize((64, 64), resample=Image.BICUBIC)
|
141 |
gray_scale_source_image_64 = image_to_tensor(input_img_with_mask_64)
|
142 |
-
|
143 |
|
144 |
source_mask_64 = (gray_scale_source_image_64!=0).float()
|
145 |
-
|
146 |
|
147 |
source_mask_64_img = tensor_to_image(source_mask_64)
|
148 |
|
149 |
# The mask should always be a boolean 64x64 mask, and then we
|
150 |
# can upsample it for the second stage.
|
151 |
source_mask_64 = source_mask_64.unsqueeze(0)
|
152 |
-
|
153 |
-
source_mask_256 = F.interpolate(source_mask_64, (256, 256), mode='nearest')
|
154 |
|
155 |
|
156 |
##############################
|
|
|
129 |
|
130 |
input_img_with_mask_64 = input_img_with_mask_converted.resize((64, 64), resample=Image.BICUBIC)
|
131 |
# TODO: make 256x256 mask more accurate when upscaling?
|
132 |
+
input_img_with_mask_256 = input_img_with_mask_converted.resize((256, 256), resample=Image.BICUBIC)
|
133 |
# return input_img, input_img_with_mask_64
|
134 |
# Source image we are inpainting
|
135 |
source_image_256 = pil_to_numpy(input_img_256)
|
|
|
139 |
# Assuming that all black pixels are meant for inpainting.
|
140 |
# input_img_with_mask_64 = input_img_with_mask.convert('L').resize((64, 64), resample=Image.BICUBIC)
|
141 |
gray_scale_source_image_64 = image_to_tensor(input_img_with_mask_64)
|
142 |
+
gray_scale_source_image_256 = image_to_tensor(input_img_with_mask_256)
|
143 |
|
144 |
source_mask_64 = (gray_scale_source_image_64!=0).float()
|
145 |
+
source_mask_256 = (gray_scale_source_image_256!=0).float()
|
146 |
|
147 |
source_mask_64_img = tensor_to_image(source_mask_64)
|
148 |
|
149 |
# The mask should always be a boolean 64x64 mask, and then we
|
150 |
# can upsample it for the second stage.
|
151 |
source_mask_64 = source_mask_64.unsqueeze(0)
|
152 |
+
source_mask_256 = source_mask_256.unsqueeze(0)
|
153 |
+
# source_mask_256 = F.interpolate(source_mask_64, (256, 256), mode='nearest')
|
154 |
|
155 |
|
156 |
##############################
|