Spaces:
Sleeping
Sleeping
fix: 动态判断系统环境是否支持gpu
Browse files
app.py
CHANGED
@@ -73,6 +73,10 @@ class Demo:
|
|
73 |
self.weight_dtype = torch.bfloat16
|
74 |
|
75 |
model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
|
|
|
|
|
|
|
|
76 |
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=self.weight_dtype).to(self.device)
|
77 |
pipe = None
|
78 |
del pipe
|
@@ -82,7 +86,8 @@ class Demo:
|
|
82 |
self.current_model = 'SDXL Turbo'
|
83 |
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
84 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
85 |
-
|
|
|
86 |
|
87 |
self.guidance_scale = 1
|
88 |
self.num_inference_steps = 3
|
@@ -319,14 +324,16 @@ class Demo:
|
|
319 |
model_id = "stabilityai/sdxl-turbo"
|
320 |
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
321 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
322 |
-
|
|
|
323 |
self.guidance_scale = 1
|
324 |
self.num_inference_steps = 3
|
325 |
self.current_model = 'SDXL Turbo'
|
326 |
else:
|
327 |
model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
328 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=self.weight_dtype).to(self.device)
|
329 |
-
|
|
|
330 |
self.guidance_scale = 7.5
|
331 |
self.num_inference_steps = 20
|
332 |
self.current_model = 'SDXL'
|
|
|
73 |
self.weight_dtype = torch.bfloat16
|
74 |
|
75 |
model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
76 |
+
if torch.cuda.is_available():
|
77 |
+
self.device = 'cuda'
|
78 |
+
else:
|
79 |
+
self.device = 'cpu'
|
80 |
pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=self.weight_dtype).to(self.device)
|
81 |
pipe = None
|
82 |
del pipe
|
|
|
86 |
self.current_model = 'SDXL Turbo'
|
87 |
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
88 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
89 |
+
if torch.cuda.is_available():
|
90 |
+
self.pipe.enable_xformers_memory_efficient_attention()
|
91 |
|
92 |
self.guidance_scale = 1
|
93 |
self.num_inference_steps = 3
|
|
|
324 |
model_id = "stabilityai/sdxl-turbo"
|
325 |
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
|
326 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
|
327 |
+
if torch.cuda.is_available():
|
328 |
+
self.pipe.enable_xformers_memory_efficient_attention()
|
329 |
self.guidance_scale = 1
|
330 |
self.num_inference_steps = 3
|
331 |
self.current_model = 'SDXL Turbo'
|
332 |
else:
|
333 |
model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
|
334 |
self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=self.weight_dtype).to(self.device)
|
335 |
+
if torch.cuda.is_available():
|
336 |
+
self.pipe.enable_xformers_memory_efficient_attention()
|
337 |
self.guidance_scale = 7.5
|
338 |
self.num_inference_steps = 20
|
339 |
self.current_model = 'SDXL'
|