ollama_mcp_gradio / demo.launcher
csepartha's picture
Upload 2 files
b4084df verified
raw
history blame
533 Bytes
#!/bin/bash
# Ensure Ollama model is pulled
echo "Pulling Ollama model (granite3.1-moe)..."
ollama pull granite3.1-moe
# Start the Ollama API server in the background
echo "Starting Ollama API server..."
ollama serve &
# Wait for Ollama server to be ready (increase the sleep time to 20 seconds)
sleep 20
# Start MCP server in the background
echo "Starting MCP server..."
python3 server.py &
# Start Gradio (client.py) on 0.0.0.0:7860
echo "Starting Gradio client..."
python3 client.py --server_name 0.0.0.0 --server_port 7860