Vanisper commited on
Commit
860e230
·
1 Parent(s): eb7a890

fix: 仅在可用 CUDA 时启用 xformers 内存高效注意力

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -87,7 +87,8 @@ class Demo:
87
  self.current_model = 'SDXL Turbo'
88
  euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
89
  self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
90
- self.pipe.enable_xformers_memory_efficient_attention()
 
91
 
92
  self.guidance_scale = 1
93
  self.num_inference_steps = 3
 
87
  self.current_model = 'SDXL Turbo'
88
  euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
89
  self.pipe = StableDiffusionXLPipeline.from_pretrained(model_id, scheduler=euler_anc, torch_dtype=self.weight_dtype).to(self.device)
90
+ if torch.cuda.is_available():
91
+ self.pipe.enable_xformers_memory_efficient_attention()
92
 
93
  self.guidance_scale = 1
94
  self.num_inference_steps = 3