Spaces:
Runtime error
Runtime error
Update start.sh
Browse filesChange back to the original version. The cursor changed so much.
start.sh
CHANGED
@@ -1,16 +1,5 @@
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
-
# Set up persistent cache directories
|
4 |
-
export HF_HOME=/data/.huggingface
|
5 |
-
export HF_HUB_CACHE=${HF_HOME}/hub
|
6 |
-
export TRANSFORMERS_CACHE=${HF_HOME}/transformers
|
7 |
-
export TORCH_HOME=${HF_HOME}/torch
|
8 |
-
export DATASETS_CACHE=${HF_HOME}/datasets
|
9 |
-
export DIFFUSERS_CACHE=${HF_HOME}/diffusers
|
10 |
-
|
11 |
-
# Create cache directories if they don't exist
|
12 |
-
mkdir -p ${HF_HOME} ${HF_HUB_CACHE} ${TRANSFORMERS_CACHE} ${TORCH_HOME} ${DATASETS_CACHE} ${DIFFUSERS_CACHE}
|
13 |
-
|
14 |
if [ ! -d "llama.cpp" ]; then
|
15 |
# only run in dev env
|
16 |
git clone https://github.com/ggerganov/llama.cpp
|
@@ -21,28 +10,13 @@ if [[ -z "${RUN_LOCALLY}" ]]; then
|
|
21 |
# enable CUDA if NOT running locally
|
22 |
export GGML_CUDA=ON
|
23 |
fi
|
|
|
24 |
|
25 |
cd llama.cpp
|
26 |
-
cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA}
|
27 |
cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
|
28 |
cp ./build/bin/llama-* .
|
29 |
rm -rf build
|
30 |
|
31 |
cd ..
|
32 |
-
|
33 |
-
# Clean only non-persistent temporary files
|
34 |
-
echo "Cleaning temporary files..."
|
35 |
-
find . -type f -name "*.pyc" -delete
|
36 |
-
find . -type f -name "*.pyo" -delete
|
37 |
-
find . -type f -name "*.pyd" -delete
|
38 |
-
find . -type d -name "__pycache__" -exec rm -r {} + 2>/dev/null || true
|
39 |
-
find . -type d -name ".pytest_cache" -exec rm -r {} + 2>/dev/null || true
|
40 |
-
find . -type d -name ".coverage" -exec rm -r {} + 2>/dev/null || true
|
41 |
-
|
42 |
-
# Clean build artifacts
|
43 |
-
find . -type d -name "build" -exec rm -r {} + 2>/dev/null || true
|
44 |
-
find . -type d -name "dist" -exec rm -r {} + 2>/dev/null || true
|
45 |
-
find . -type d -name "*.egg-info" -exec rm -r {} + 2>/dev/null || true
|
46 |
-
|
47 |
-
echo "Starting application..."
|
48 |
-
python app.py
|
|
|
1 |
#!/bin/bash
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
if [ ! -d "llama.cpp" ]; then
|
4 |
# only run in dev env
|
5 |
git clone https://github.com/ggerganov/llama.cpp
|
|
|
10 |
# enable CUDA if NOT running locally
|
11 |
export GGML_CUDA=ON
|
12 |
fi
|
13 |
+
echo "GGML_CUDA=$GGML_CUDA"
|
14 |
|
15 |
cd llama.cpp
|
16 |
+
cmake -B build -DBUILD_SHARED_LIBS=OFF -DGGML_CUDA=${GGML_CUDA} -DLLAMA_CURL=OFF
|
17 |
cmake --build build --config Release -j --target llama-quantize llama-gguf-split llama-imatrix
|
18 |
cp ./build/bin/llama-* .
|
19 |
rm -rf build
|
20 |
|
21 |
cd ..
|
22 |
+
python app.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|