Spaces:
Running
on
Zero
Running
on
Zero
Upload 4 files
Browse files- app.py +1 -1
- dc.py +5 -2
- modutils.py +9 -3
- requirements.txt +4 -3
app.py
CHANGED
@@ -98,7 +98,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
98 |
with gr.Row():
|
99 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
100 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
101 |
-
gpu_duration = gr.Slider(label="GPU time duration (seconds)", minimum=5, maximum=240, value=
|
102 |
with gr.Row():
|
103 |
width = gr.Slider(label="Width", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
|
104 |
height = gr.Slider(label="Height", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
|
|
|
98 |
with gr.Row():
|
99 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
100 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
101 |
+
gpu_duration = gr.Slider(label="GPU time duration (seconds)", minimum=5, maximum=240, value=20)
|
102 |
with gr.Row():
|
103 |
width = gr.Slider(label="Width", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
|
104 |
height = gr.Slider(label="Height", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
|
dc.py
CHANGED
@@ -53,10 +53,13 @@ from diffusers import FluxPipeline
|
|
53 |
# import urllib.parse
|
54 |
import subprocess
|
55 |
|
56 |
-
|
|
|
|
|
|
|
57 |
|
58 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
59 |
-
|
60 |
# os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
|
61 |
print(os.getenv("SPACES_ZERO_GPU"))
|
62 |
|
|
|
53 |
# import urllib.parse
|
54 |
import subprocess
|
55 |
|
56 |
+
IS_ZERO = True if os.getenv("SPACES_ZERO_GPU", None) else False
|
57 |
+
if IS_ZERO:
|
58 |
+
subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
59 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
60 |
|
61 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
62 |
+
|
63 |
# os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
|
64 |
print(os.getenv("SPACES_ZERO_GPU"))
|
65 |
|
modutils.py
CHANGED
@@ -508,12 +508,18 @@ def get_t2i_model_info(repo_id: str):
|
|
508 |
return gr.update(value=md)
|
509 |
|
510 |
|
|
|
|
|
|
|
511 |
def get_tupled_model_list(model_list):
|
512 |
if not model_list: return []
|
513 |
#return [(x, x) for x in model_list] # for skipping this function
|
514 |
tupled_list = []
|
515 |
-
|
516 |
-
|
|
|
|
|
|
|
517 |
try:
|
518 |
if not api.repo_exists(repo_id): continue
|
519 |
model = api.model_info(repo_id=repo_id, timeout=0.5)
|
@@ -521,7 +527,7 @@ def get_tupled_model_list(model_list):
|
|
521 |
print(f"{repo_id}: {e}")
|
522 |
tupled_list.append((repo_id, repo_id))
|
523 |
continue
|
524 |
-
if model.
|
525 |
tags = model.tags
|
526 |
info = []
|
527 |
if not 'diffusers' in tags: continue
|
|
|
508 |
return gr.update(value=md)
|
509 |
|
510 |
|
511 |
+
MAX_MODEL_INFO = 100
|
512 |
+
|
513 |
+
|
514 |
def get_tupled_model_list(model_list):
|
515 |
if not model_list: return []
|
516 |
#return [(x, x) for x in model_list] # for skipping this function
|
517 |
tupled_list = []
|
518 |
+
api = HfApi()
|
519 |
+
for i, repo_id in enumerate(model_list):
|
520 |
+
if i > MAX_MODEL_INFO:
|
521 |
+
tupled_list.append((repo_id, repo_id))
|
522 |
+
continue
|
523 |
try:
|
524 |
if not api.repo_exists(repo_id): continue
|
525 |
model = api.model_info(repo_id=repo_id, timeout=0.5)
|
|
|
527 |
print(f"{repo_id}: {e}")
|
528 |
tupled_list.append((repo_id, repo_id))
|
529 |
continue
|
530 |
+
if model.tags is None: continue
|
531 |
tags = model.tags
|
532 |
info = []
|
533 |
if not 'diffusers' in tags: continue
|
requirements.txt
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
stablepy==0.6.1
|
2 |
diffusers==0.31.0
|
3 |
transformers==4.47.1
|
4 |
-
#diffusers
|
5 |
-
#transformers
|
6 |
accelerate
|
7 |
invisible_watermark
|
8 |
datasets
|
@@ -11,6 +9,8 @@ numpy<2
|
|
11 |
gdown
|
12 |
opencv-python
|
13 |
huggingface_hub
|
|
|
|
|
14 |
scikit-build-core
|
15 |
https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.4-cu124/llama_cpp_python-0.3.4-cp310-cp310-linux_x86_64.whl
|
16 |
git+https://github.com/Maximilian-Winter/llama-cpp-agent
|
@@ -24,4 +24,5 @@ timm
|
|
24 |
wrapt-timeout-decorator
|
25 |
sentencepiece
|
26 |
unidecode
|
27 |
-
ultralytics>=8.3.47
|
|
|
|
1 |
stablepy==0.6.1
|
2 |
diffusers==0.31.0
|
3 |
transformers==4.47.1
|
|
|
|
|
4 |
accelerate
|
5 |
invisible_watermark
|
6 |
datasets
|
|
|
9 |
gdown
|
10 |
opencv-python
|
11 |
huggingface_hub
|
12 |
+
hf_transfer
|
13 |
+
hf_xet
|
14 |
scikit-build-core
|
15 |
https://github.com/abetlen/llama-cpp-python/releases/download/v0.3.4-cu124/llama_cpp_python-0.3.4-cp310-cp310-linux_x86_64.whl
|
16 |
git+https://github.com/Maximilian-Winter/llama-cpp-agent
|
|
|
24 |
wrapt-timeout-decorator
|
25 |
sentencepiece
|
26 |
unidecode
|
27 |
+
ultralytics>=8.3.47
|
28 |
+
pydantic==2.10.6
|