Spaces:
Running
on
Zero
Running
on
Zero
Update files.
Browse files- app.py +3 -4
- webui/runner.py +4 -0
- webui/tab_style_t2i.py +3 -3
- webui/tab_style_transfer.py +2 -2
- webui/tab_texture_synthesis.py +2 -2
app.py
CHANGED
@@ -7,8 +7,6 @@ from webui import (
|
|
7 |
)
|
8 |
|
9 |
|
10 |
-
import os
|
11 |
-
os.environ["no_proxy"] = "localhost,127.0.0.1,::1"
|
12 |
|
13 |
|
14 |
def main():
|
@@ -31,8 +29,9 @@ def main():
|
|
31 |
|
32 |
with gr.TabItem("Texture Synthesis", id='tab_texture_syn'):
|
33 |
create_interface_texture_synthesis(runner=runner)
|
34 |
-
|
35 |
-
demo.
|
|
|
36 |
|
37 |
|
38 |
if __name__ == '__main__':
|
|
|
7 |
)
|
8 |
|
9 |
|
|
|
|
|
10 |
|
11 |
|
12 |
def main():
|
|
|
29 |
|
30 |
with gr.TabItem("Texture Synthesis", id='tab_texture_syn'):
|
31 |
create_interface_texture_synthesis(runner=runner)
|
32 |
+
|
33 |
+
demo.queue().lanuch()
|
34 |
+
# demo.launch(share=False, debug=False)
|
35 |
|
36 |
|
37 |
if __name__ == '__main__':
|
webui/runner.py
CHANGED
@@ -9,6 +9,7 @@ from pipeline_sdxl import ADPipeline as ADXLPipeline
|
|
9 |
from utils import Controller
|
10 |
|
11 |
import os
|
|
|
12 |
|
13 |
|
14 |
class Runner:
|
@@ -37,6 +38,7 @@ class Runner:
|
|
37 |
image = image.resize(size, Image.BICUBIC)
|
38 |
return to_tensor(image).unsqueeze(0)
|
39 |
|
|
|
40 |
def run_style_transfer(self, content_image, style_image, seed, num_steps, lr, content_weight, mixed_precision, model, **kwargs):
|
41 |
self.load_pipeline(model)
|
42 |
|
@@ -65,6 +67,7 @@ class Runner:
|
|
65 |
torch.cuda.empty_cache()
|
66 |
return [output_image]
|
67 |
|
|
|
68 |
def run_style_t2i_generation(self, style_image, prompt, negative_prompt, guidance_scale, height, width, seed, num_steps, iterations, lr, num_images_per_prompt, mixed_precision, is_adain, model):
|
69 |
self.load_pipeline(model)
|
70 |
|
@@ -100,6 +103,7 @@ class Runner:
|
|
100 |
torch.cuda.empty_cache()
|
101 |
return output_images
|
102 |
|
|
|
103 |
def run_texture_synthesis(self, texture_image, height, width, seed, num_steps, iterations, lr, mixed_precision, num_images_per_prompt, synthesis_way,model):
|
104 |
self.load_pipeline(model)
|
105 |
|
|
|
9 |
from utils import Controller
|
10 |
|
11 |
import os
|
12 |
+
import spaces
|
13 |
|
14 |
|
15 |
class Runner:
|
|
|
38 |
image = image.resize(size, Image.BICUBIC)
|
39 |
return to_tensor(image).unsqueeze(0)
|
40 |
|
41 |
+
@spaces.GPU
|
42 |
def run_style_transfer(self, content_image, style_image, seed, num_steps, lr, content_weight, mixed_precision, model, **kwargs):
|
43 |
self.load_pipeline(model)
|
44 |
|
|
|
67 |
torch.cuda.empty_cache()
|
68 |
return [output_image]
|
69 |
|
70 |
+
@spaces.GPU
|
71 |
def run_style_t2i_generation(self, style_image, prompt, negative_prompt, guidance_scale, height, width, seed, num_steps, iterations, lr, num_images_per_prompt, mixed_precision, is_adain, model):
|
72 |
self.load_pipeline(model)
|
73 |
|
|
|
103 |
torch.cuda.empty_cache()
|
104 |
return output_images
|
105 |
|
106 |
+
@spaces.GPU
|
107 |
def run_texture_synthesis(self, texture_image, height, width, seed, num_steps, iterations, lr, mixed_precision, num_images_per_prompt, synthesis_way,model):
|
108 |
self.load_pipeline(model)
|
109 |
|
webui/tab_style_t2i.py
CHANGED
@@ -18,13 +18,13 @@ def create_interface_style_t2i(runner):
|
|
18 |
prompt = gr.Textbox(label='Prompt', value='A rocket')
|
19 |
negative_prompt = gr.Textbox(label='Negative Prompt', value='')
|
20 |
|
21 |
-
base_model_list = ['stable-diffusion-v1-5', 'stable-diffusion-xl-base-1.0']
|
22 |
-
model = gr.Radio(choices=base_model_list, label='Select a Base Model', value='stable-diffusion-xl-base-1.0')
|
23 |
|
24 |
run_button = gr.Button(value='Run')
|
25 |
|
26 |
gr.Examples(
|
27 |
-
[[Image.open('./webui/images/image_02_01.jpg').convert('RGB'), 'A rocket', 'stable-diffusion-xl-base-1.0']],
|
28 |
[style_image, prompt, model]
|
29 |
)
|
30 |
|
|
|
18 |
prompt = gr.Textbox(label='Prompt', value='A rocket')
|
19 |
negative_prompt = gr.Textbox(label='Negative Prompt', value='')
|
20 |
|
21 |
+
base_model_list = ['stable-diffusion-v1-5/stable-diffusion-v1-5', 'stabilityai/stable-diffusion-xl-base-1.0']
|
22 |
+
model = gr.Radio(choices=base_model_list, label='Select a Base Model', value='stabilityai/stable-diffusion-xl-base-1.0')
|
23 |
|
24 |
run_button = gr.Button(value='Run')
|
25 |
|
26 |
gr.Examples(
|
27 |
+
[[Image.open('./webui/images/image_02_01.jpg').convert('RGB'), 'A rocket', 'stabilityai/stable-diffusion-xl-base-1.0']],
|
28 |
[style_image, prompt, model]
|
29 |
)
|
30 |
|
webui/tab_style_transfer.py
CHANGED
@@ -27,8 +27,8 @@ def create_interface_style_transfer(runner):
|
|
27 |
content_weight = gr.Slider(label='Content Weight', minimum=0., maximum=1., value=0.25, step=0.001)
|
28 |
mixed_precision = gr.Radio(choices=['bf16', 'no'], value='bf16', label='Mixed Precision')
|
29 |
|
30 |
-
base_model_list = ['stable-diffusion-v1-5',]
|
31 |
-
model = gr.Radio(choices=base_model_list, label='Select a Base Model', value='stable-diffusion-v1-5')
|
32 |
|
33 |
with gr.Column():
|
34 |
gr.Markdown('#### Output Image:\n')
|
|
|
27 |
content_weight = gr.Slider(label='Content Weight', minimum=0., maximum=1., value=0.25, step=0.001)
|
28 |
mixed_precision = gr.Radio(choices=['bf16', 'no'], value='bf16', label='Mixed Precision')
|
29 |
|
30 |
+
base_model_list = ['stable-diffusion-v1-5/stable-diffusion-v1-5',]
|
31 |
+
model = gr.Radio(choices=base_model_list, label='Select a Base Model', value='stable-diffusion-v1-5/stable-diffusion-v1-5')
|
32 |
|
33 |
with gr.Column():
|
34 |
gr.Markdown('#### Output Image:\n')
|
webui/tab_texture_synthesis.py
CHANGED
@@ -28,8 +28,8 @@ def create_interface_texture_synthesis(runner):
|
|
28 |
mixed_precision = gr.Radio(choices=['bf16', 'no'], value='bf16', label='Mixed Precision')
|
29 |
num_images_per_prompt = gr.Slider(label='Num Images Per Prompt', minimum=1, maximum=10, value=1, step=1)
|
30 |
|
31 |
-
base_model_list = ['stable-diffusion-v1-5',]
|
32 |
-
model = gr.Radio(choices=base_model_list, label='Select a Base Model', value='stable-diffusion-v1-5')
|
33 |
synthesis_way = gr.Radio(['Sampling', 'MultiDiffusion'], label='Synthesis Way', value='MultiDiffusion')
|
34 |
|
35 |
with gr.Column():
|
|
|
28 |
mixed_precision = gr.Radio(choices=['bf16', 'no'], value='bf16', label='Mixed Precision')
|
29 |
num_images_per_prompt = gr.Slider(label='Num Images Per Prompt', minimum=1, maximum=10, value=1, step=1)
|
30 |
|
31 |
+
base_model_list = ['stable-diffusion-v1-5/stable-diffusion-v1-5',]
|
32 |
+
model = gr.Radio(choices=base_model_list, label='Select a Base Model', value='stable-diffusion-v1-5/stable-diffusion-v1-5')
|
33 |
synthesis_way = gr.Radio(['Sampling', 'MultiDiffusion'], label='Synthesis Way', value='MultiDiffusion')
|
34 |
|
35 |
with gr.Column():
|