miike-ai commited on
Commit
d74b0c4
·
verified ·
1 Parent(s): 961a1ab

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +22 -2
Dockerfile CHANGED
@@ -3,6 +3,10 @@ FROM ubuntu:22.04
3
  # Set environment variables
4
  ENV DEBIAN_FRONTEND=noninteractive
5
  ENV HOME=/root
 
 
 
 
6
 
7
  # Install dependencies, code-server, and Ollama
8
  RUN apt-get update && \
@@ -17,7 +21,7 @@ RUN apt-get update && \
17
  # Install pip for Python 3.12
18
  curl -sS https://bootstrap.pypa.io/get-pip.py | python3.12 && \
19
  # Upgrade pip
20
- python3 -m pip install --upgrade pip && \
21
  # Install Node.js 22.x from NodeSource
22
  curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
23
  apt-get install -y nodejs && \
@@ -32,6 +36,16 @@ RUN npm install -g @anthropic-ai/claude-code @anthropic-ai/dxt
32
  # Create a directory for the workspace
33
  RUN mkdir -p /workspace
34
 
 
 
 
 
 
 
 
 
 
 
35
  # Create configuration directory for code-server and Ollama
36
  RUN mkdir -p /root/.config/code-server /root/.ollama
37
 
@@ -46,8 +60,14 @@ RUN code-server --install-extension ms-python.python && \
46
  code-server --install-extension ritwickdey.LiveServer && \
47
  code-server --install-extension ms-toolsai.jupyter
48
 
49
- # Create a startup script
50
  RUN echo '#!/bin/bash\n\
 
 
 
 
 
 
51
  # Start Ollama in the background\n\
52
  /usr/local/bin/ollama serve &\n\
53
  \n\
 
3
  # Set environment variables
4
  ENV DEBIAN_FRONTEND=noninteractive
5
  ENV HOME=/root
6
+ # CUDA paths for when running on GPU (RunPod)
7
+ ENV CUDA_HOME=/usr/local/cuda
8
+ ENV PATH=${CUDA_HOME}/bin:${PATH}
9
+ ENV LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}
10
 
11
  # Install dependencies, code-server, and Ollama
12
  RUN apt-get update && \
 
21
  # Install pip for Python 3.12
22
  curl -sS https://bootstrap.pypa.io/get-pip.py | python3.12 && \
23
  # Upgrade pip
24
+ python3 -m pip install --upgrade pip setuptools wheel && \
25
  # Install Node.js 22.x from NodeSource
26
  curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
27
  apt-get install -y nodejs && \
 
36
  # Create a directory for the workspace
37
  RUN mkdir -p /workspace
38
 
39
+ # Copy requirements files (if they exist)
40
+ COPY requirements*.txt /workspace/
41
+
42
+ # Create a requirements-cpu.txt without vllm for build time
43
+ RUN if [ -f /workspace/requirements.txt ]; then \
44
+ grep -v "vllm" /workspace/requirements.txt | grep -v "xformers==.*+.*" > /workspace/requirements-cpu.txt || true; \
45
+ echo "xformers==0.0.32" >> /workspace/requirements-cpu.txt; \
46
+ pip3 install --no-cache-dir -r /workspace/requirements-cpu.txt || true; \
47
+ fi
48
+
49
  # Create configuration directory for code-server and Ollama
50
  RUN mkdir -p /root/.config/code-server /root/.ollama
51
 
 
60
  code-server --install-extension ritwickdey.LiveServer && \
61
  code-server --install-extension ms-toolsai.jupyter
62
 
63
+ # Create a startup script that handles vllm installation on GPU
64
  RUN echo '#!/bin/bash\n\
65
+ # Check if running on GPU and install vllm if needed\n\
66
+ if [ -d "/usr/local/cuda" ] && ! python3 -c "import vllm" 2>/dev/null; then\n\
67
+ echo "GPU detected, installing vllm for Blackwell support..."\n\
68
+ pip3 install git+https://github.com/vllm-project/vllm.git@04e1642e3 || echo "vllm installation failed, continuing..."\n\
69
+ fi\n\
70
+ \n\
71
  # Start Ollama in the background\n\
72
  /usr/local/bin/ollama serve &\n\
73
  \n\