Molbap HF Staff commited on
Commit
2ccc01b
Β·
1 Parent(s): 13231fe

cache clean

Browse files
Files changed (1) hide show
  1. modular_graph_and_candidates.py +7 -0
modular_graph_and_candidates.py CHANGED
@@ -41,6 +41,7 @@ from sentence_transformers import SentenceTransformer, util
41
  from tqdm import tqdm
42
  import numpy as np
43
  import spaces
 
44
 
45
  # ────────────────────────────────────────────────────────────────────────────────
46
  # CONFIG
@@ -140,6 +141,12 @@ def embedding_similarity_clusters(models_root: Path, missing: List[str], thr: fl
140
  emb = model.encode(batch_texts, convert_to_numpy=True, show_progress_bar=False)
141
  all_embeddings.append(emb)
142
  print(f"βœ“ Completed batch of {len(batch_names)} models")
 
 
 
 
 
 
143
  except Exception as e:
144
  print(f"⚠️ GPU worker error for batch {batch_names}: {type(e).__name__}: {e}")
145
  # Create zero embeddings for all models in failed batch
 
41
  from tqdm import tqdm
42
  import numpy as np
43
  import spaces
44
+ import torch
45
 
46
  # ────────────────────────────────────────────────────────────────────────────────
47
  # CONFIG
 
141
  emb = model.encode(batch_texts, convert_to_numpy=True, show_progress_bar=False)
142
  all_embeddings.append(emb)
143
  print(f"βœ“ Completed batch of {len(batch_names)} models")
144
+
145
+ # Clear GPU cache every 5 batches to prevent memory accumulation
146
+ if i % (5 * batch_size) == 0 and torch.cuda.is_available():
147
+ torch.cuda.empty_cache()
148
+ print(f"🧹 Cleared GPU cache after batch {i//batch_size + 1}")
149
+
150
  except Exception as e:
151
  print(f"⚠️ GPU worker error for batch {batch_names}: {type(e).__name__}: {e}")
152
  # Create zero embeddings for all models in failed batch