Brianpuz commited on
Commit
c28e5cf
·
verified ·
1 Parent(s): 23cbd40

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -11,6 +11,7 @@ from gradio_huggingfacehub_search import HuggingfaceHubSearch
11
  from apscheduler.schedulers.background import BackgroundScheduler
12
  from datetime import datetime
13
  import numpy as np
 
14
  HF_TOKEN = os.environ.get("HF_TOKEN")
15
 
16
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
@@ -204,7 +205,7 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
204
 
205
  current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
206
  logger.info(f"Time {current_time}, Username {username}, Model_ID, {model_id}, q_method {','.join(q_method)}")
207
-
208
  repo_namespace = get_repo_namespace(repo_owner, username, user_orgs)
209
  model_name = model_id.split('/')[-1]
210
  try:
@@ -225,7 +226,8 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
225
  fp16 = str(Path(outdir)/f"{model_name}.fp16.gguf")
226
 
227
  with tempfile.TemporaryDirectory(dir=downloads_dir) as tmpdir:
228
- print("Downloading")
 
229
  local_dir = Path(tmpdir)/model_name
230
  api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
231
 
@@ -234,12 +236,15 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
234
  if os.path.exists(adapter_config_dir) and not os.path.exists(config_dir):
235
  raise Exception("adapter_config.json is present. If converting LoRA, use GGUF-my-lora.")
236
 
237
- print("Download successfully")
 
238
  result = subprocess.run(["python", CONVERSION_SCRIPT, local_dir, "--outtype", "f16", "--outfile", fp16], shell=False, capture_output=True)
239
  print("Converted to f16")
240
  if result.returncode != 0:
241
  raise Exception(f"Error converting to fp16: {result.stderr.decode()}")
242
 
 
 
243
  imatrix_path = Path(outdir)/"imatrix.dat"
244
  if use_imatrix:
245
  train_data_path = train_data_file.name if train_data_file else "llama.cpp/groups_merged.txt"
@@ -252,7 +257,8 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
252
 
253
  gguf_files = []
254
  for method in quant_methods:
255
- print("Begin quantize")
 
256
  name = f"{model_name.lower()}-{method.lower()}-{suffix}.gguf" if suffix else f"{model_name.lower()}-{method.lower()}.gguf"
257
  path = str(Path(outdir)/name)
258
  quant_cmd = ["./llama.cpp/llama-quantize", "--imatrix", imatrix_path, fp16, path, method] if use_imatrix else ["./llama.cpp/llama-quantize", fp16, path, method]
@@ -262,7 +268,8 @@ def process_model(model_id, q_method, use_imatrix, imatrix_q_method, private_rep
262
  size = os.path.getsize(path)/1024/1024/1024
263
  gguf_files.append((name, path, size, method))
264
 
265
- print("Quantize successfully!")
 
266
  suffix_for_repo = f"{imatrix_q_method}-imat" if use_imatrix else "-".join(quant_methods)
267
  repo_id = f"{repo_namespace}/{model_name}-{suffix_for_repo}-GGUF"
268
  new_repo_url = api.create_repo(repo_id=repo_id, exist_ok=True, private=private_repo)
 
11
  from apscheduler.schedulers.background import BackgroundScheduler
12
  from datetime import datetime
13
  import numpy as np
14
+ import shutil
15
  HF_TOKEN = os.environ.get("HF_TOKEN")
16
 
17
  os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
 
205
 
206
  current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
207
  logger.info(f"Time {current_time}, Username {username}, Model_ID, {model_id}, q_method {','.join(q_method)}")
208
+
209
  repo_namespace = get_repo_namespace(repo_owner, username, user_orgs)
210
  model_name = model_id.split('/')[-1]
211
  try:
 
226
  fp16 = str(Path(outdir)/f"{model_name}.fp16.gguf")
227
 
228
  with tempfile.TemporaryDirectory(dir=downloads_dir) as tmpdir:
229
+ print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Start download")
230
+ logger.info(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Start download")
231
  local_dir = Path(tmpdir)/model_name
232
  api.snapshot_download(repo_id=model_id, local_dir=local_dir, local_dir_use_symlinks=False, allow_patterns=dl_pattern)
233
 
 
236
  if os.path.exists(adapter_config_dir) and not os.path.exists(config_dir):
237
  raise Exception("adapter_config.json is present. If converting LoRA, use GGUF-my-lora.")
238
 
239
+ print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Download successfully")
240
+ logger.info(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Download successfully")
241
  result = subprocess.run(["python", CONVERSION_SCRIPT, local_dir, "--outtype", "f16", "--outfile", fp16], shell=False, capture_output=True)
242
  print("Converted to f16")
243
  if result.returncode != 0:
244
  raise Exception(f"Error converting to fp16: {result.stderr.decode()}")
245
 
246
+ shutil.rmtree(downloads_dir)
247
+
248
  imatrix_path = Path(outdir)/"imatrix.dat"
249
  if use_imatrix:
250
  train_data_path = train_data_file.name if train_data_file else "llama.cpp/groups_merged.txt"
 
257
 
258
  gguf_files = []
259
  for method in quant_methods:
260
+ print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Begin quantize")
261
+ logger.info(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Begin quantize")
262
  name = f"{model_name.lower()}-{method.lower()}-{suffix}.gguf" if suffix else f"{model_name.lower()}-{method.lower()}.gguf"
263
  path = str(Path(outdir)/name)
264
  quant_cmd = ["./llama.cpp/llama-quantize", "--imatrix", imatrix_path, fp16, path, method] if use_imatrix else ["./llama.cpp/llama-quantize", fp16, path, method]
 
268
  size = os.path.getsize(path)/1024/1024/1024
269
  gguf_files.append((name, path, size, method))
270
 
271
+ print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Quantize successfully!")
272
+ logger.info(datetime.now().strftime("%Y-%m-%d %H:%M:%S") + " Quantize successfully!")
273
  suffix_for_repo = f"{imatrix_q_method}-imat" if use_imatrix else "-".join(quant_methods)
274
  repo_id = f"{repo_namespace}/{model_name}-{suffix_for_repo}-GGUF"
275
  new_repo_url = api.create_repo(repo_id=repo_id, exist_ok=True, private=private_repo)