Tomtom84 commited on
Commit
10df7bc
·
verified ·
1 Parent(s): 675256a

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +6 -7
Dockerfile CHANGED
@@ -22,6 +22,7 @@ RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian12/x86_6
22
  ENV PATH=/usr/local/cuda/bin:${PATH}
23
  ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH}
24
  ENV CUDAToolkit_ROOT=/usr/local/cuda
 
25
 
26
  RUN useradd -m -u 1000 user
27
  USER user
@@ -32,16 +33,14 @@ WORKDIR /app
32
 
33
  COPY --chown=user . /app
34
 
35
- # 1. ENV setzen für alle nachfolgenden Builds
36
- ENV CMAKE_ARGS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=86"
37
-
38
  RUN pip install --upgrade pip
39
 
40
- # 2. llama-cpp-python (ohne zusätzliches --config-settings, CMAKE_ARGS reicht hier)
41
- RUN pip install --no-cache-dir llama-cpp-python
42
 
43
- # 3. outetts mit CUDA
44
- RUN pip install --no-cache-dir --upgrade outetts
45
 
46
  RUN pip install --no-cache-dir -r requirements.txt
47
 
 
22
  ENV PATH=/usr/local/cuda/bin:${PATH}
23
  ENV LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH}
24
  ENV CUDAToolkit_ROOT=/usr/local/cuda
25
+ ENV CMAKE_ARGS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=86"
26
 
27
  RUN useradd -m -u 1000 user
28
  USER user
 
33
 
34
  COPY --chown=user . /app
35
 
36
+ # Wichtig: Isolation deaktivieren für llama-cpp-python Build
 
 
37
  RUN pip install --upgrade pip
38
 
39
+ # Optional: zuerst llama-cpp-python bauen (cachebar)
40
+ RUN pip install --no-cache-dir --no-build-isolation llama-cpp-python
41
 
42
+ # Danach: outetts (zieht llama-cpp-python nicht erneut)
43
+ RUN pip install --no-cache-dir --no-build-isolation outetts
44
 
45
  RUN pip install --no-cache-dir -r requirements.txt
46