Brianpuz commited on
Commit
a093223
·
verified ·
1 Parent(s): 6b0bae1

Add the X address and github address to the generated readme file.

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -49,6 +49,8 @@ def get_llama_cpp_notes(
49
  version = result.stdout.strip().split("-")[0]
50
  text = f"""
51
  *Produced by [Antigma Labs](https://antigma.ai)*
 
 
52
  ## llama.cpp quantization
53
  Using <a href="https://github.com/ggml-org/llama.cpp">llama.cpp</a> release <a href="https://github.com/ggml-org/llama.cpp/releases/tag/{version}">b4944</a> for quantization.
54
  Original model: https://huggingface.co/{model_id}
 
49
  version = result.stdout.strip().split("-")[0]
50
  text = f"""
51
  *Produced by [Antigma Labs](https://antigma.ai)*
52
+ *Follow Antigma Labs in X [https://x.com/antigma_labs](https://x.com/antigma_labs)*
53
+ *Antigma's GitHub Homepage [https://github.com/AntigmaLabs](https://github.com/AntigmaLabs)*
54
  ## llama.cpp quantization
55
  Using <a href="https://github.com/ggml-org/llama.cpp">llama.cpp</a> release <a href="https://github.com/ggml-org/llama.cpp/releases/tag/{version}">b4944</a> for quantization.
56
  Original model: https://huggingface.co/{model_id}