ollama_mcp_gradio / demo.launcher
csepartha's picture
Upload demo.launcher
c10a27a verified
raw
history blame
309 Bytes
#!/bin/bash
# Pull your Ollama model (can be moved to build if desired)
ollama pull granite3.1-moe
ollama serve &
sleep 5
# Start MCP server in the background
python3 server.py &
sleep 5 # Ensure server is fully initialized
# Start Gradio client
python3 client.py --server_name 0.0.0.0 --server_port 7860