ollama_mcp_gradio / demo.launcher
csepartha's picture
Upload 3 files
2b8669f verified
raw
history blame
534 Bytes
#!/bin/bash
# Ensure that Ollama model is pulled
echo "Pulling Ollama model (granite3.1-moe)..."
ollama pull granite3.1-moe
# Start the Ollama API server in the background
echo "Starting Ollama API server..."
ollama serve &
# Wait for Ollama server to be ready (increase the sleep time if needed)
sleep 10
# Start MCP server in the background
echo "Starting MCP server..."
python3 server.py &
# Start Gradio (client.py) on 0.0.0.0:7860
echo "Starting Gradio client..."
python3 client.py --server_name 0.0.0.0 --server_port 7860