vscode-python312 / Dockerfile
miike-ai's picture
Update Dockerfile
d74b0c4 verified
raw
history blame
3.32 kB
FROM ubuntu:22.04
# Set environment variables
ENV DEBIAN_FRONTEND=noninteractive
ENV HOME=/root
# CUDA paths for when running on GPU (RunPod)
ENV CUDA_HOME=/usr/local/cuda
ENV PATH=${CUDA_HOME}/bin:${PATH}
ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
# Install dependencies, code-server, and Ollama
RUN apt-get update && \
apt-get install -y curl wget gpg apt-transport-https git software-properties-common && \
# Add Python 3.12
add-apt-repository ppa:deadsnakes/ppa && \
apt-get update && \
apt-get install -y python3.12 python3.12-venv python3.12-dev && \
# Make Python 3.12 the default
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1 && \
update-alternatives --set python3 /usr/bin/python3.12 && \
# Install pip for Python 3.12
curl -sS https://bootstrap.pypa.io/get-pip.py | python3.12 && \
# Upgrade pip
python3 -m pip install --upgrade pip setuptools wheel && \
# Install Node.js 22.x from NodeSource
curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
apt-get install -y nodejs && \
# Install code-server
curl -fsSL https://code-server.dev/install.sh | sh && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install global npm packages
RUN npm install -g @anthropic-ai/claude-code @anthropic-ai/dxt
# Create a directory for the workspace
RUN mkdir -p /workspace
# Copy requirements files (if they exist)
COPY requirements*.txt /workspace/
# Create a requirements-cpu.txt without vllm for build time
RUN if [ -f /workspace/requirements.txt ]; then \
grep -v "vllm" /workspace/requirements.txt | grep -v "xformers==.*+.*" > /workspace/requirements-cpu.txt || true; \
echo "xformers==0.0.32" >> /workspace/requirements-cpu.txt; \
pip3 install --no-cache-dir -r /workspace/requirements-cpu.txt || true; \
fi
# Create configuration directory for code-server and Ollama
RUN mkdir -p /root/.config/code-server /root/.ollama
# Configure code-server to run on port 8443
RUN echo "bind-addr: 0.0.0.0:8443\nauth: none\ncert: false" > /root/.config/code-server/config.yaml
# Install Ollama after code-server is set up
RUN curl -fsSL https://ollama.com/install.sh | sh || true
# Install some useful VS Code extensions
RUN code-server --install-extension ms-python.python && \
code-server --install-extension ritwickdey.LiveServer && \
code-server --install-extension ms-toolsai.jupyter
# Create a startup script that handles vllm installation on GPU
RUN echo '#!/bin/bash\n\
# Check if running on GPU and install vllm if needed\n\
if [ -d "/usr/local/cuda" ] && ! python3 -c "import vllm" 2>/dev/null; then\n\
echo "GPU detected, installing vllm for Blackwell support..."\n\
pip3 install git+https://github.com/vllm-project/vllm.git@04e1642e3 || echo "vllm installation failed, continuing..."\n\
fi\n\
\n\
# Start Ollama in the background\n\
/usr/local/bin/ollama serve &\n\
\n\
# Give Ollama a moment to start\n\
sleep 2\n\
\n\
# Start code-server in the foreground\n\
exec code-server --disable-telemetry --bind-addr 0.0.0.0:8443 /workspace\n\
' > /start.sh && \
chmod +x /start.sh
# Expose ports for both services
EXPOSE 8443 11434
# Set the workspace as working directory
WORKDIR /workspace
# Start both services
CMD ["/start.sh"]