endorno commited on
Commit
e9c96ba
·
1 Parent(s): e0b563e

run on local

Browse files
Files changed (2) hide show
  1. app.py +19 -13
  2. requirements.txt +7 -2
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  import numpy as np
3
  import random
4
  from diffusers import DiffusionPipeline
@@ -7,14 +8,15 @@ import spaces
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
- if torch.cuda.is_available():
11
- torch.cuda.max_memory_allocated(device=device)
12
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
13
- pipe.enable_xformers_memory_efficient_attention()
14
- pipe = pipe.to(device)
15
- else:
16
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
17
- pipe = pipe.to(device)
 
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
@@ -121,17 +123,17 @@ with gr.Blocks(css=css) as demo:
121
  guidance_scale = gr.Slider(
122
  label="Guidance scale",
123
  minimum=0.0,
124
- maximum=10.0,
125
  step=0.1,
126
- value=0.0,
127
  )
128
 
129
  num_inference_steps = gr.Slider(
130
  label="Number of inference steps",
131
  minimum=1,
132
- maximum=12,
133
  step=1,
134
- value=2,
135
  )
136
 
137
  gr.Examples(
@@ -145,4 +147,8 @@ with gr.Blocks(css=css) as demo:
145
  outputs = [result]
146
  )
147
 
148
- demo.queue().launch()
 
 
 
 
 
1
  import gradio as gr
2
+ import os
3
  import numpy as np
4
  import random
5
  from diffusers import DiffusionPipeline
 
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
11
+ # if torch.cuda.is_available():
12
+ torch.cuda.max_memory_allocated(device=device)
13
+ # pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
14
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16)
15
+ pipe.enable_xformers_memory_efficient_attention()
16
+ pipe = pipe.to(device)
17
+ # else:
18
+ # pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
19
+ # pipe = pipe.to(device)
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  MAX_IMAGE_SIZE = 1024
 
123
  guidance_scale = gr.Slider(
124
  label="Guidance scale",
125
  minimum=0.0,
126
+ maximum=20.0,
127
  step=0.1,
128
+ value=6.0,
129
  )
130
 
131
  num_inference_steps = gr.Slider(
132
  label="Number of inference steps",
133
  minimum=1,
134
+ maximum=50,
135
  step=1,
136
+ value=20,
137
  )
138
 
139
  gr.Examples(
 
147
  outputs = [result]
148
  )
149
 
150
+ port = 19876
151
+ hostname = os.uname()[1]
152
+ print(f"launch http://{hostname}:{port}")
153
+
154
+ demo.queue().launch(server_name="0.0.0.0", server_port=port)
requirements.txt CHANGED
@@ -1,6 +1,11 @@
 
 
1
  accelerate
2
  diffusers==0.26.3
3
  invisible_watermark
4
- torch==2.2.0
 
 
 
5
  transformers
6
- xformers==0.0.24
 
1
+ gradio==4.31.5
2
+ spaces
3
  accelerate
4
  diffusers==0.26.3
5
  invisible_watermark
6
+ # torch==2.2.0
7
+ # xformers==0.0.24
8
+ torch==2.0.1
9
+ xformers==0.0.22
10
  transformers
11
+