File size: 1,707 Bytes
b427a2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
from torchvision import nn
import gradio as gr
import os
import time
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import copy
import torchvision.models as models
import torchvision.transforms.functional as TF
from PIL import Image
import numpy as np
#Defining the predict function
def style_transfer(cont_img,styl_img):
#Start the timer
start_time = time.time()
#transform the input image
style_img = image_transform(styl_img)
content_img =image_transform(cont_img)
#getting input image
input_img = content_img.clone()
#running the style transfer
output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,
content_img, style_img, input_img)
# output_img = output.detach().cpu().squeeze(0)
# output_img = TF.to_pil_image(output_img)
end_time=time.time()
pred_time =round(end_time- start_time, 5)
return output
##Gradio App
import gradio as gr
title= 'Style Transfer'
description='A model to transfer the style of one image to another'
article = 'Created at Pytorch Model Deployment'
#example_images
example_images = [["/content/content.jpg" ,"/content/style.jpg"]]
#Create the gradio demo
demo = gr.Interface(
fn=style_transfer,
inputs=[
gr.inputs.Image(label="content Image"),
gr.inputs.Image(label="style_image")
],
examples=example_images,
outputs="image",
allow_flagging=False,
title=title,
description=description,
article=article
)
# Launch the Gradio interface
demo.launch(debug=True)
|