File size: 580 Bytes
6455a8e 52dd340 c9f46e7 |
1 2 3 4 5 6 7 8 9 10 11 12 |
#!/bin/bash
export WORK="/home/user/app"
cd $WORK
unzip llama_cpp_avx512.zip > /dev/null 2>&1
echo "✅ Booting up llama server..."
# wget -O model.gguf https://huggingface.co/lmstudio-community/Qwen3-4B-Instruct-2507-GGUF/resolve/main/Qwen3-4B-Instruct-2507-Q6_K.gguf?download=true > /dev/null 2>&1
wget -O model.gguf https://huggingface.co/unsloth/gpt-oss-20b-GGUF/resolve/main/gpt-oss-20b-F16.gguf?download=true > /dev/null 2>&1
./llama-server -m model.gguf --port 8000 --host 0.0.0.0 --threads 2 --ctx-size 4096 --mlock --jinja
echo "✅ llama server running on port 8000" |