Spaces:
Running
on
Zero
Running
on
Zero
LPX
commited on
Commit
·
0fc95c6
1
Parent(s):
ca4d7e4
Add model initialization with MD API key in app_v4.py
Browse files
app_v4.py
CHANGED
@@ -20,6 +20,8 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
20 |
MAX_SEED = 1000000
|
21 |
|
22 |
model_cache = {"models": None}
|
|
|
|
|
23 |
|
24 |
@spaces.GPU(duration=12) # This function gets priority for GPU access
|
25 |
def load_warm_models():
|
@@ -42,10 +44,9 @@ def get_model():
|
|
42 |
# # subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
43 |
@spaces.GPU()
|
44 |
def init_space():
|
45 |
-
global pipe
|
46 |
pipe = get_model()["pipeline"]
|
47 |
-
|
48 |
-
return pipe, model
|
49 |
|
50 |
@spaces.GPU(duration=12)
|
51 |
@torch.no_grad()
|
|
|
20 |
MAX_SEED = 1000000
|
21 |
|
22 |
model_cache = {"models": None}
|
23 |
+
md_api_key = os.getenv("MD_KEY")
|
24 |
+
model = md.vl(api_key=md_api_key)
|
25 |
|
26 |
@spaces.GPU(duration=12) # This function gets priority for GPU access
|
27 |
def load_warm_models():
|
|
|
44 |
# # subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
|
45 |
@spaces.GPU()
|
46 |
def init_space():
|
47 |
+
global pipe
|
48 |
pipe = get_model()["pipeline"]
|
49 |
+
return pipe
|
|
|
50 |
|
51 |
@spaces.GPU(duration=12)
|
52 |
@torch.no_grad()
|