File size: 518 Bytes
3eaaf3a
 
b6beebe
 
 
 
 
 
 
 
 
 
 
 
 
edffad8
b6beebe
 
 
 
 
60c42fc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#!/bin/bash

if [ ! -d "llama.cpp" ]; then
  # only run in dev env
  git clone https://github.com/ggerganov/llama.cpp
fi

export GGML_CUDA=OFF
if [[ -z "${RUN_LOCALLY}" ]]; then
  # enable CUDA if NOT running locally
  export GGML_CUDA=ON
fi
echo "GGML_CUDA=$GGML_CUDA"

cd llama.cpp
cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA} -DLLAMA_CURL=OFF
cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
cp ./build/bin/llama-* .
rm -rf build

cd ..
python app.py