Spaces:
Running
on
Zero
Running
on
Zero
cache clean
Browse files
modular_graph_and_candidates.py
CHANGED
@@ -41,6 +41,7 @@ from sentence_transformers import SentenceTransformer, util
|
|
41 |
from tqdm import tqdm
|
42 |
import numpy as np
|
43 |
import spaces
|
|
|
44 |
|
45 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
46 |
# CONFIG
|
@@ -140,6 +141,12 @@ def embedding_similarity_clusters(models_root: Path, missing: List[str], thr: fl
|
|
140 |
emb = model.encode(batch_texts, convert_to_numpy=True, show_progress_bar=False)
|
141 |
all_embeddings.append(emb)
|
142 |
print(f"β Completed batch of {len(batch_names)} models")
|
|
|
|
|
|
|
|
|
|
|
|
|
143 |
except Exception as e:
|
144 |
print(f"β οΈ GPU worker error for batch {batch_names}: {type(e).__name__}: {e}")
|
145 |
# Create zero embeddings for all models in failed batch
|
|
|
41 |
from tqdm import tqdm
|
42 |
import numpy as np
|
43 |
import spaces
|
44 |
+
import torch
|
45 |
|
46 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
47 |
# CONFIG
|
|
|
141 |
emb = model.encode(batch_texts, convert_to_numpy=True, show_progress_bar=False)
|
142 |
all_embeddings.append(emb)
|
143 |
print(f"β Completed batch of {len(batch_names)} models")
|
144 |
+
|
145 |
+
# Clear GPU cache every 5 batches to prevent memory accumulation
|
146 |
+
if i % (5 * batch_size) == 0 and torch.cuda.is_available():
|
147 |
+
torch.cuda.empty_cache()
|
148 |
+
print(f"π§Ή Cleared GPU cache after batch {i//batch_size + 1}")
|
149 |
+
|
150 |
except Exception as e:
|
151 |
print(f"β οΈ GPU worker error for batch {batch_names}: {type(e).__name__}: {e}")
|
152 |
# Create zero embeddings for all models in failed batch
|