lolout1 commited on
Commit
0b9a79d
·
1 Parent(s): 0361d96

testing new app.py and Dockerfile configs

Browse files
Files changed (2) hide show
  1. Dockerfile +20 -8
  2. app.py +27 -27
Dockerfile CHANGED
@@ -17,30 +17,42 @@ RUN apt-get update && apt-get install -y \
17
  unzip \
18
  ffmpeg \
19
  libopencv-dev \
 
20
  && rm -rf /var/lib/apt/lists/*
21
 
22
- # Set up user
23
  RUN useradd -m -u 1000 user
24
 
25
- # Install PyTorch CPU version as root (system-wide)
26
- RUN pip install torch==2.0.1+cpu torchvision==0.15.2+cpu torchaudio==2.0.2+cpu --index-url https://download.pytorch.org/whl/cpu
 
27
 
28
- # Clone and install detectron2 as root (without -e flag)
29
  RUN git clone https://github.com/facebookresearch/detectron2 /tmp/detectron2 && \
30
  cd /tmp/detectron2 && \
31
- pip install .
 
 
32
 
33
  # Switch to user
34
  USER user
35
  ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
36
  WORKDIR $HOME/app
37
 
38
- # Copy and install other requirements
39
- COPY --chown=user:user requirements.txt .
40
- RUN pip install --user --no-cache-dir -r requirements.txt
 
 
 
 
 
41
 
42
  # Copy app files
43
  COPY --chown=user:user . .
44
 
 
 
 
45
  EXPOSE 7860
46
  CMD ["python", "app.py"]
 
17
  unzip \
18
  ffmpeg \
19
  libopencv-dev \
20
+ ninja-build \
21
  && rm -rf /var/lib/apt/lists/*
22
 
23
+ # Create user
24
  RUN useradd -m -u 1000 user
25
 
26
+ # Install specific PyTorch CPU version FIRST
27
+ RUN pip install torch==2.0.1+cpu torchvision==0.15.2+cpu torchaudio==2.0.2+cpu --index-url https://download.pytorch.org/whl/cpu --no-deps
28
+ RUN pip install numpy pillow typing-extensions
29
 
30
+ # Install detectron2
31
  RUN git clone https://github.com/facebookresearch/detectron2 /tmp/detectron2 && \
32
  cd /tmp/detectron2 && \
33
+ git checkout v0.6 && \
34
+ pip install -e . --no-deps && \
35
+ pip install -r requirements.txt
36
 
37
  # Switch to user
38
  USER user
39
  ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
40
  WORKDIR $HOME/app
41
 
42
+ # Set environment for CPU
43
+ ENV CUDA_VISIBLE_DEVICES=""
44
+ ENV FORCE_CUDA="0"
45
+ ENV TORCH_CUDA_ARCH_LIST=""
46
+
47
+ # Copy and install requirements
48
+ COPY --chown=user:user requirements-no-torch.txt requirements.txt
49
+ RUN pip install --user --no-cache-dir -r requirements-no-torch.txt
50
 
51
  # Copy app files
52
  COPY --chown=user:user . .
53
 
54
+ # Install NATTEN last to ensure correct torch version
55
+ RUN pip install --user natten==0.14.6+torch200cpu -f https://shi-labs.com/natten/wheels/cpu/torch2.0.0/index.html --no-deps
56
+
57
  EXPOSE 7860
58
  CMD ["python", "app.py"]
app.py CHANGED
@@ -1,38 +1,38 @@
1
  #!/usr/bin/env python3
2
  """
3
- Hugging Face Spaces entry point for OneFormer application
4
  """
5
 
6
  import os
7
- import sys
8
- import subprocess
9
 
10
- # Set up environment variables for HF Spaces
11
- os.environ['CUDA_HOME'] = '/usr/local/cuda' if os.path.exists('/usr/local/cuda') else ''
 
 
12
 
13
- # Install deformable attention ops if not already installed
14
- def setup_deformable_attention():
15
- ops_dir = os.path.join(os.path.dirname(__file__), 'oneformer/modeling/pixel_decoder/ops')
16
- if os.path.exists(ops_dir):
17
- try:
18
- subprocess.run(['bash', 'deform_setup.sh'], check=True, cwd=os.path.dirname(__file__))
19
- print("Deformable attention ops installed successfully")
20
- except Exception as e:
21
- print(f"Warning: Could not install deformable attention ops: {e}")
22
- print("Continuing without custom CUDA kernels...")
23
 
24
- # Run setup on first launch
25
- if not os.path.exists('oneformer/modeling/pixel_decoder/ops/build'):
26
- setup_deformable_attention()
 
 
 
 
 
27
 
28
- # Import and run the main gradio app
29
- from gradio_test import demo
 
 
 
 
 
 
30
 
31
  if __name__ == "__main__":
32
- # Launch with HF Spaces compatible settings
33
- demo.launch(
34
- server_name="0.0.0.0",
35
- server_port=7860,
36
- share=False, # Disabled on HF Spaces
37
- debug=False
38
- )
 
1
  #!/usr/bin/env python3
2
  """
3
+ Minimal OneFormer Demo for HuggingFace Spaces
4
  """
5
 
6
  import os
7
+ os.environ['CUDA_VISIBLE_DEVICES'] = ''
 
8
 
9
+ import gradio as gr
10
+ import torch
11
+ import numpy as np
12
+ from PIL import Image
13
 
14
+ # Force CPU
15
+ device = torch.device("cpu")
 
 
 
 
 
 
 
 
16
 
17
+ def process_image(image):
18
+ """Simple image processing function"""
19
+ if image is None:
20
+ return None
21
+
22
+ # For now, just return the image with a message
23
+ # Replace this with actual OneFormer inference
24
+ return image
25
 
26
+ # Create simple interface
27
+ iface = gr.Interface(
28
+ fn=process_image,
29
+ inputs=gr.Image(type="numpy"),
30
+ outputs=gr.Image(type="numpy"),
31
+ title="OneFormer Demo",
32
+ description="OneFormer: Universal Image Segmentation (CPU Mode)",
33
+ )
34
 
35
  if __name__ == "__main__":
36
+ print(f"PyTorch version: {torch.__version__}")
37
+ print(f"Running on: CPU")
38
+ iface.launch(server_name="0.0.0.0", server_port=7860)