Spaces:
Runtime error
Runtime error
fix
Browse files
app.py
CHANGED
@@ -1,30 +1,19 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
import paddlehub as hub
|
3 |
-
import numpy as np
|
4 |
from PIL import Image
|
5 |
-
import
|
6 |
|
7 |
-
|
8 |
|
9 |
|
10 |
-
def inference(content,
|
11 |
-
|
12 |
-
|
13 |
-
'content': cv2.imread(content.name),
|
14 |
-
'styles': [cv2.imread(style.name)]
|
15 |
-
}])
|
16 |
-
return Image.fromarray(np.uint8(result[0]['data'])[:, :, ::-1]).convert('RGB')
|
17 |
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
description = "๐ This service can be used to transfer the image style in one picture ๐ into another picture๐๏ธ."
|
22 |
-
article = "๐ก Hint: first upload your target picture ๐๏ธ into the upper left file box, " \
|
23 |
-
"and the style picture ๐ into the lower left file box, " \
|
24 |
-
"then click the \"Submit\" button, wait a moment before you can check the generated picture โ."
|
25 |
-
examples = []
|
26 |
-
iface = gr.Interface(inference, inputs=[gr.inputs.Image(type="file", label='Your input picture ๐๏ธ'),
|
27 |
-
gr.inputs.Image(type="file", label='The style picture ๐')],
|
28 |
-
outputs=gr.outputs.Image(type="pil"), enable_queue=True, title=title, article=article,
|
29 |
-
description=description, examples=examples)
|
30 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import cv2
|
3 |
import paddlehub as hub
|
|
|
4 |
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
|
7 |
+
model = hub.Module(name='stylepro_artistic')
|
8 |
|
9 |
|
10 |
+
def inference(content,style):
|
11 |
+
res = model.style_transfer(images=[{'content': cv2.imread(content),'styles': [cv2.imread(style)]}])
|
12 |
+
return res[0]['data'][:,:,::-1]
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
|
15 |
+
title="stylepro_artistic"
|
16 |
+
description="Art style transfer models can convert a given image to an arbitrary art style. This model, StyleProNet, adopts a fully convolutional neural network architecture (FCNs) as a whole, and reconstructs artistic style pictures through encoder-decoder. The core of StyleProNet is the unparameterized content-style fusion algorithm Style Projection, which has a small model size and fast response speed. The loss function of model training includes style loss, content perceptual loss and content KL loss, which ensures that the model can restore the semantic details of content pictures and style information of style pictures with high fidelity. The pre-training dataset uses the MS-COCO dataset as the content-side images, and the WikiArt dataset as the style-side images. For more details, please refer to the StyleProNet paper https://arxiv.org/abs/2003.07694 ."
|
17 |
|
18 |
+
examples=[]
|
19 |
+
gr.Interface(inference,[gr.inputs.Image(type="filepath"),gr.inputs.Image(type="filepath")],gr.outputs.Image(type="numpy"),title=title,description=description,examples=examples).launch(enable_queue=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|