Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoImageProcessor, MobileNetV2ForSemanticSegmentation
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
|
7 |
+
# Load segmentation pipeline and model
|
8 |
+
pipe = pipeline("image-segmentation", model="google/deeplabv3_mobilenet_v2_1.0_513")
|
9 |
+
processor = AutoImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
|
10 |
+
model = MobileNetV2ForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
|
11 |
+
|
12 |
+
def VirtualTryOn(user_image, clothing_image):
|
13 |
+
if isinstance(user_image, np.ndarray):
|
14 |
+
user_image = Image.fromarray(user_image)
|
15 |
+
|
16 |
+
if isinstance(clothing_image, np.ndarray):
|
17 |
+
clothing_image = Image.fromarray(clothing_image)
|
18 |
+
|
19 |
+
if isinstance(user_image, Image.Image) and isinstance(clothing_image, Image.Image):
|
20 |
+
user_segmentation = pipe(user_image)
|
21 |
+
user_mask = user_segmentation[0]['mask']
|
22 |
+
|
23 |
+
clothing_segmentation = pipe(clothing_image)
|
24 |
+
clothing_mask = clothing_segmentation[0]['mask']
|
25 |
+
|
26 |
+
user_mask_array = np.array(user_mask)
|
27 |
+
clothing_mask_array = np.array(clothing_mask)
|
28 |
+
|
29 |
+
user_image_array = np.array(user_image)
|
30 |
+
clothing_image_array = np.array(clothing_image)
|
31 |
+
|
32 |
+
user_isolated = cv2.bitwise_and(user_image_array, user_image_array, mask=user_mask_array)
|
33 |
+
clothing_isolated = cv2.bitwise_and(clothing_image_array, clothing_image_array, mask=clothing_mask_array)
|
34 |
+
|
35 |
+
user_height, user_width, _ = user_isolated.shape
|
36 |
+
clothing_resized = cv2.resize(clothing_isolated, (user_width, user_height))
|
37 |
+
|
38 |
+
combined_image = cv2.add(user_isolated, clothing_resized)
|
39 |
+
combined_image = Image.fromarray(combined_image)
|
40 |
+
|
41 |
+
return combined_image
|
42 |
+
else:
|
43 |
+
raise ValueError("Both inputs should be PIL images")
|
44 |
+
|
45 |
+
css = """
|
46 |
+
.gradio-container {
|
47 |
+
background-color: #f5f5f5;
|
48 |
+
font-family: 'Arial', sans-serif;
|
49 |
+
padding: 20px;
|
50 |
+
border-radius: 15px;
|
51 |
+
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
|
52 |
+
width: 90vw;
|
53 |
+
max-width: 1200px;
|
54 |
+
margin: auto;
|
55 |
+
}
|
56 |
+
h1 {
|
57 |
+
color: #333333;
|
58 |
+
text-align: center;
|
59 |
+
font-size: 2.5rem;
|
60 |
+
margin-bottom: 20px;
|
61 |
+
}
|
62 |
+
#images-container {
|
63 |
+
display: flex;
|
64 |
+
justify-content: space-around;
|
65 |
+
align-items: center;
|
66 |
+
gap: 20px;
|
67 |
+
padding: 15px;
|
68 |
+
border: 2px solid #cccccc;
|
69 |
+
border-radius: 15px;
|
70 |
+
background-color: #ffffff;
|
71 |
+
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
|
72 |
+
}
|
73 |
+
.image-container {
|
74 |
+
display: flex;
|
75 |
+
flex-direction: column;
|
76 |
+
align-items: center;
|
77 |
+
gap: 10px;
|
78 |
+
}
|
79 |
+
.image-container label {
|
80 |
+
font-weight: bold;
|
81 |
+
color: #555555;
|
82 |
+
}
|
83 |
+
.image-box {
|
84 |
+
width: 220px;
|
85 |
+
height: 300px;
|
86 |
+
border: 3px dashed #aaaaaa;
|
87 |
+
border-radius: 10px;
|
88 |
+
display: flex;
|
89 |
+
justify-content: center;
|
90 |
+
align-items: center;
|
91 |
+
background-color: #f9f9f9;
|
92 |
+
}
|
93 |
+
button {
|
94 |
+
font-size: 1.2rem;
|
95 |
+
padding: 10px 20px;
|
96 |
+
border-radius: 10px;
|
97 |
+
border: none;
|
98 |
+
cursor: pointer;
|
99 |
+
transition: all 0.3s ease;
|
100 |
+
}
|
101 |
+
#try-on-button {
|
102 |
+
background-color: #4CAF50;
|
103 |
+
color: white;
|
104 |
+
}
|
105 |
+
#try-on-button:hover {
|
106 |
+
background-color: #45a049;
|
107 |
+
}
|
108 |
+
#clear-button {
|
109 |
+
background-color: #FF5722;
|
110 |
+
color: white;
|
111 |
+
}
|
112 |
+
#clear-button:hover {
|
113 |
+
background-color: #e64a19;
|
114 |
+
}
|
115 |
+
"""
|
116 |
+
|
117 |
+
with gr.Blocks(css=css) as iface:
|
118 |
+
gr.Markdown("<h1>Virtual Try-On Application</h1>")
|
119 |
+
|
120 |
+
with gr.Row(elem_id="images-container"):
|
121 |
+
with gr.Column(elem_id="user-image-container", elem_classes="image-container"):
|
122 |
+
gr.Markdown("**Upload Person Image**")
|
123 |
+
user_image = gr.Image(type="pil", label="Person Image", elem_id="user-image", elem_classes="image-box")
|
124 |
+
|
125 |
+
with gr.Column(elem_id="clothing-image-container", elem_classes="image-container"):
|
126 |
+
gr.Markdown("**Upload Clothing Image**")
|
127 |
+
clothing_image = gr.Image(type="pil", label="Clothing Image", elem_id="clothing-image", elem_classes="image-box")
|
128 |
+
|
129 |
+
with gr.Column(elem_id="output-image-container", elem_classes="image-container"):
|
130 |
+
gr.Markdown("**Try-On Result**")
|
131 |
+
output = gr.Image(type="pil", label="Result", elem_id="output", elem_classes="image-box")
|
132 |
+
|
133 |
+
with gr.Row():
|
134 |
+
with gr.Column():
|
135 |
+
try_on_button = gr.Button("Try On", elem_id="try-on-button")
|
136 |
+
with gr.Column():
|
137 |
+
clear_button = gr.Button("Clear", elem_id="clear-button")
|
138 |
+
|
139 |
+
try_on_button.click(fn=VirtualTryOn, inputs=[user_image, clothing_image], outputs=output)
|
140 |
+
clear_button.click(fn=lambda: (None, None, None), inputs=[], outputs=[user_image, clothing_image, output])
|
141 |
+
|
142 |
+
iface.launch()
|