retromarz commited on
Commit
b031819
·
verified ·
1 Parent(s): f40c282

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -2
app.py CHANGED
@@ -7,6 +7,8 @@ import json
7
  import os
8
  from datetime import datetime
9
  import uuid
 
 
10
 
11
  # Set up logging
12
  logging.basicConfig(level=logging.INFO)
@@ -15,14 +17,27 @@ logger = logging.getLogger(__name__)
15
  # Define output JSON file path
16
  OUTPUT_JSON_PATH = "captions.json"
17
 
18
- # Load the model and processor
19
  MODEL_PATH = "fancyfeast/llama-joycaption-beta-one-hf-llava"
20
  try:
 
 
 
 
 
 
 
 
 
 
 
 
21
  processor = AutoProcessor.from_pretrained(MODEL_PATH)
22
  model = LlavaForConditionalGeneration.from_pretrained(
23
  MODEL_PATH,
24
  torch_dtype=torch.float32, # CPU-compatible dtype
25
- low_cpu_mem_usage=True # Minimize memory usage
 
26
  ).to("cpu")
27
  model.eval()
28
  logger.info("Model and processor loaded successfully.")
 
7
  import os
8
  from datetime import datetime
9
  import uuid
10
+ from huggingface_hub import snapshot_download
11
+ import shutil
12
 
13
  # Set up logging
14
  logging.basicConfig(level=logging.INFO)
 
17
  # Define output JSON file path
18
  OUTPUT_JSON_PATH = "captions.json"
19
 
20
+ # Clear Hugging Face cache and download model
21
  MODEL_PATH = "fancyfeast/llama-joycaption-beta-one-hf-llava"
22
  try:
23
+ # Clear cache to avoid corrupted files
24
+ cache_dir = os.path.expanduser("~/.cache/huggingface/hub")
25
+ model_cache = os.path.join(cache_dir, f"models--{MODEL_PATH.replace('/', '--')}")
26
+ if os.path.exists(model_cache):
27
+ shutil.rmtree(model_cache)
28
+ logger.info(f"Cleared cache for {MODEL_PATH}")
29
+
30
+ # Pre-download model to ensure integrity
31
+ snapshot_download(repo_id=MODEL_PATH)
32
+ logger.info(f"Downloaded model {MODEL_PATH}")
33
+
34
+ # Load processor and model
35
  processor = AutoProcessor.from_pretrained(MODEL_PATH)
36
  model = LlavaForConditionalGeneration.from_pretrained(
37
  MODEL_PATH,
38
  torch_dtype=torch.float32, # CPU-compatible dtype
39
+ low_cpu_mem_usage=True, # Minimize memory usage
40
+ use_safetensors=True # Force safetensors
41
  ).to("cpu")
42
  model.eval()
43
  logger.info("Model and processor loaded successfully.")