John6666 commited on
Commit
1f6f333
·
verified ·
1 Parent(s): 0a7e407

Upload 2 files

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -53,10 +53,12 @@ from diffusers import FluxPipeline
53
  # import urllib.parse
54
  import subprocess
55
 
56
- subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
57
-
 
 
58
  ImageFile.LOAD_TRUNCATED_IMAGES = True
59
- torch.backends.cuda.matmul.allow_tf32 = True
60
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
61
  print(os.getenv("SPACES_ZERO_GPU"))
62
 
@@ -893,7 +895,7 @@ with gr.Blocks(theme='NoCrypt/miku@>=1.2.2', elem_id="main", fill_width=True, cs
893
  history_clear_button.click(lambda: ([], []), None, [history_gallery, history_files], queue=False, show_api=False)
894
 
895
  with gr.Row(equal_height=False, variant="default"):
896
- gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
897
  with gr.Column():
898
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
899
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")
 
53
  # import urllib.parse
54
  import subprocess
55
 
56
+ IS_ZEROGPU = os.getenv("SPACES_ZERO_GPU", False)
57
+ if IS_ZEROGPU:
58
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
59
+ torch.backends.cuda.matmul.allow_tf32 = True
60
  ImageFile.LOAD_TRUNCATED_IMAGES = True
61
+
62
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
63
  print(os.getenv("SPACES_ZERO_GPU"))
64
 
 
895
  history_clear_button.click(lambda: ([], []), None, [history_gallery, history_files], queue=False, show_api=False)
896
 
897
  with gr.Row(equal_height=False, variant="default"):
898
+ gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=20, show_label=False, container=False, info="GPU time duration (seconds)")
899
  with gr.Column():
900
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
901
  load_lora_cpu_gui = gr.Checkbox(value=False, container=False, label="Load LoRAs on CPU")