infinfin commited on
Commit
6a34b22
ยท
1 Parent(s): f610694
Files changed (1) hide show
  1. app.py +11 -22
app.py CHANGED
@@ -1,30 +1,19 @@
1
  import gradio as gr
 
2
  import paddlehub as hub
3
- import numpy as np
4
  from PIL import Image
5
- import cv2
6
 
7
- stylepro_artistic = hub.Module(name="stylepro_artistic")
8
 
9
 
10
- def inference(content, style):
11
- result = stylepro_artistic.style_transfer(
12
- images=[{
13
- 'content': cv2.imread(content.name),
14
- 'styles': [cv2.imread(style.name)]
15
- }])
16
- return Image.fromarray(np.uint8(result[0]['data'])[:, :, ::-1]).convert('RGB')
17
 
 
 
 
18
 
19
- # ๐Ÿ”ฎ โœจ ๐ŸŽจ๐Ÿ–ผ๏ธ๐Ÿ–ผ๏ธ๐ŸŒ‡๐ŸŒ„
20
- title = "โœจโœจ ๐Ÿž๏ธ๐ŸŒ‡ Image Style Transfer ๐ŸŒ‡๐Ÿž๏ธ โœจโœจ"
21
- description = "๐Ÿ’ This service can be used to transfer the image style in one picture ๐ŸŒ‡ into another picture๐Ÿž๏ธ."
22
- article = "๐Ÿ’ก Hint: first upload your target picture ๐Ÿž๏ธ into the upper left file box, " \
23
- "and the style picture ๐ŸŒ‡ into the lower left file box, " \
24
- "then click the \"Submit\" button, wait a moment before you can check the generated picture โ˜•."
25
- examples = []
26
- iface = gr.Interface(inference, inputs=[gr.inputs.Image(type="file", label='Your input picture ๐Ÿž๏ธ'),
27
- gr.inputs.Image(type="file", label='The style picture ๐ŸŒ‡')],
28
- outputs=gr.outputs.Image(type="pil"), enable_queue=True, title=title, article=article,
29
- description=description, examples=examples)
30
- iface.launch()
 
1
  import gradio as gr
2
+ import cv2
3
  import paddlehub as hub
 
4
  from PIL import Image
5
+ import numpy as np
6
 
7
+ model = hub.Module(name='stylepro_artistic')
8
 
9
 
10
+ def inference(content,style):
11
+ res = model.style_transfer(images=[{'content': cv2.imread(content),'styles': [cv2.imread(style)]}])
12
+ return res[0]['data'][:,:,::-1]
 
 
 
 
13
 
14
+
15
+ title="stylepro_artistic"
16
+ description="Art style transfer models can convert a given image to an arbitrary art style. This model, StyleProNet, adopts a fully convolutional neural network architecture (FCNs) as a whole, and reconstructs artistic style pictures through encoder-decoder. The core of StyleProNet is the unparameterized content-style fusion algorithm Style Projection, which has a small model size and fast response speed. The loss function of model training includes style loss, content perceptual loss and content KL loss, which ensures that the model can restore the semantic details of content pictures and style information of style pictures with high fidelity. The pre-training dataset uses the MS-COCO dataset as the content-side images, and the WikiArt dataset as the style-side images. For more details, please refer to the StyleProNet paper https://arxiv.org/abs/2003.07694 ."
17
 
18
+ examples=[]
19
+ gr.Interface(inference,[gr.inputs.Image(type="filepath"),gr.inputs.Image(type="filepath")],gr.outputs.Image(type="numpy"),title=title,description=description,examples=examples).launch(enable_queue=True)