Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -34,12 +34,12 @@ from image_gen_aux import UpscaleWithModel
|
|
34 |
#from diffusers.models.attention_processor import AttnProcessor2_0
|
35 |
from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
|
36 |
|
37 |
-
torch.backends.cuda.matmul.allow_tf32 =
|
38 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
39 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
40 |
-
torch.backends.cudnn.allow_tf32 =
|
41 |
torch.backends.cudnn.deterministic = False
|
42 |
-
torch.backends.cudnn.benchmark =
|
43 |
torch.backends.cuda.preferred_blas_library="cublas"
|
44 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
45 |
|
|
|
34 |
#from diffusers.models.attention_processor import AttnProcessor2_0
|
35 |
from transformers import CLIPTextModelWithProjection, CLIPTextModel, CLIPTokenizer
|
36 |
|
37 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
38 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
39 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
40 |
+
torch.backends.cudnn.allow_tf32 = False
|
41 |
torch.backends.cudnn.deterministic = False
|
42 |
+
torch.backends.cudnn.benchmark = False
|
43 |
torch.backends.cuda.preferred_blas_library="cublas"
|
44 |
torch.backends.cuda.preferred_linalg_library="cusolver"
|
45 |
|