testing new Dockerfile and app.py configs
Browse files- Dockerfile +34 -44
- app.py +69 -60
- utils/universal_contrast_analyzer.py +168 -115
Dockerfile
CHANGED
@@ -14,74 +14,64 @@ RUN apt-get update && apt-get install -y \
|
|
14 |
libgl1-mesa-glx \
|
15 |
wget \
|
16 |
curl \
|
17 |
-
ninja-build \
|
18 |
&& rm -rf /var/lib/apt/lists/*
|
19 |
|
20 |
-
# Create user
|
21 |
RUN useradd -m -u 1000 user
|
22 |
|
|
|
23 |
WORKDIR /app
|
24 |
|
25 |
-
#
|
26 |
-
RUN pip install --upgrade pip
|
27 |
-
RUN pip install setuptools==69.5.1 wheel
|
28 |
|
29 |
-
# Install PyTorch
|
30 |
-
RUN pip install
|
31 |
|
32 |
-
#
|
33 |
-
RUN pip install
|
34 |
|
35 |
-
# Install
|
36 |
-
RUN pip install
|
37 |
|
38 |
-
# Install detectron2 -
|
39 |
-
RUN
|
40 |
-
cd /tmp/detectron2 && \
|
41 |
-
git checkout v0.6 && \
|
42 |
-
pip install --no-build-isolation --no-deps . && \
|
43 |
-
cd / && \
|
44 |
-
rm -rf /tmp/detectron2
|
45 |
|
46 |
-
# Install
|
47 |
RUN pip install \
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
tqdm \
|
56 |
-
|
57 |
-
yacs \
|
58 |
-
tabulate \
|
59 |
-
cloudpickle \
|
60 |
-
Pillow \
|
61 |
-
scipy
|
62 |
-
|
63 |
-
# Install additional dependencies
|
64 |
-
RUN pip install \
|
65 |
-
gradio \
|
66 |
-
huggingface_hub \
|
67 |
-
scikit-learn \
|
68 |
-
scikit-image
|
69 |
|
70 |
# Switch to user
|
71 |
USER user
|
72 |
ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
|
73 |
|
74 |
# Copy application files
|
75 |
-
COPY --chown=user:user . /app
|
|
|
76 |
|
77 |
-
|
78 |
-
COPY --chown=user:user requirements.txt /app/
|
79 |
-
RUN pip install --user --no-deps -r requirements.txt || true
|
80 |
|
81 |
-
#
|
82 |
ENV CUDA_VISIBLE_DEVICES=""
|
83 |
ENV FORCE_CUDA="0"
|
84 |
ENV TORCH_CUDA_ARCH_LIST=""
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
EXPOSE 7860
|
|
|
|
|
87 |
CMD ["python", "app.py"]
|
|
|
14 |
libgl1-mesa-glx \
|
15 |
wget \
|
16 |
curl \
|
|
|
17 |
&& rm -rf /var/lib/apt/lists/*
|
18 |
|
19 |
+
# Create user (required by Hugging Face)
|
20 |
RUN useradd -m -u 1000 user
|
21 |
|
22 |
+
# Set working directory
|
23 |
WORKDIR /app
|
24 |
|
25 |
+
# Install Python dependencies in the correct order
|
26 |
+
RUN pip install --upgrade pip setuptools wheel
|
|
|
27 |
|
28 |
+
# Install PyTorch dependencies FIRST
|
29 |
+
RUN pip install sympy filelock jinja2 networkx requests typing-extensions
|
30 |
|
31 |
+
# Install PyTorch CPU version
|
32 |
+
RUN pip install torch==2.0.1+cpu torchvision==0.15.2+cpu --index-url https://download.pytorch.org/whl/cpu
|
33 |
|
34 |
+
# CRITICAL FIX: Install compatible Pillow version
|
35 |
+
RUN pip install numpy==1.24.3 pillow==9.5.0
|
36 |
|
37 |
+
# Install detectron2 from pre-built wheel
|
38 |
+
RUN pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch2.0/index.html
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
+
# Install other core dependencies
|
41 |
RUN pip install \
|
42 |
+
opencv-python==4.8.1.78 \
|
43 |
+
scipy==1.10.1 \
|
44 |
+
scikit-learn==1.3.0 \
|
45 |
+
scikit-image==0.21.0 \
|
46 |
+
matplotlib==3.7.2 \
|
47 |
+
gradio==3.50.2 \
|
48 |
+
huggingface_hub==0.19.4 \
|
49 |
tqdm \
|
50 |
+
pycocotools
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
# Switch to user
|
53 |
USER user
|
54 |
ENV HOME=/home/user PATH=/home/user/.local/bin:$PATH
|
55 |
|
56 |
# Copy application files
|
57 |
+
COPY --chown=user:user requirements.txt /app/requirements.txt
|
58 |
+
RUN pip install --user -r /app/requirements.txt
|
59 |
|
60 |
+
COPY --chown=user:user . /app
|
|
|
|
|
61 |
|
62 |
+
# Set environment for CPU
|
63 |
ENV CUDA_VISIBLE_DEVICES=""
|
64 |
ENV FORCE_CUDA="0"
|
65 |
ENV TORCH_CUDA_ARCH_LIST=""
|
66 |
|
67 |
+
# Create necessary directories
|
68 |
+
RUN mkdir -p /app/oneformer && \
|
69 |
+
mkdir -p /app/utils && \
|
70 |
+
mkdir -p /app/configs && \
|
71 |
+
mkdir -p /app/demo
|
72 |
+
|
73 |
+
# Expose port
|
74 |
EXPOSE 7860
|
75 |
+
|
76 |
+
# Run the application
|
77 |
CMD ["python", "app.py"]
|
app.py
CHANGED
@@ -1,91 +1,100 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
"""
|
3 |
-
NeuroNest
|
|
|
4 |
"""
|
5 |
|
6 |
import os
|
7 |
import sys
|
8 |
-
import
|
|
|
|
|
9 |
|
10 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
os.environ['CUDA_VISIBLE_DEVICES'] = ''
|
12 |
os.environ['FORCE_CUDA'] = '0'
|
13 |
|
14 |
-
def
|
15 |
-
"""
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
try:
|
24 |
-
subprocess.run([
|
25 |
-
sys.executable, "-m", "pip", "install",
|
26 |
-
"--no-build-isolation",
|
27 |
-
"git+https://github.com/facebookresearch/detectron2.git@v0.6"
|
28 |
-
], check=True)
|
29 |
-
import detectron2
|
30 |
-
return True
|
31 |
-
except:
|
32 |
-
pass
|
33 |
-
|
34 |
-
# Method 2: Clone and build
|
35 |
try:
|
36 |
-
import
|
37 |
-
|
38 |
-
|
39 |
-
"git", "clone", "--depth", "1", "--branch", "v0.6",
|
40 |
-
"https://github.com/facebookresearch/detectron2.git",
|
41 |
-
f"{tmpdir}/detectron2"
|
42 |
-
], check=True)
|
43 |
-
|
44 |
-
subprocess.run([
|
45 |
-
sys.executable, "-m", "pip", "install",
|
46 |
-
"--no-build-isolation", f"{tmpdir}/detectron2"
|
47 |
-
], check=True)
|
48 |
|
49 |
import detectron2
|
50 |
-
|
51 |
-
except:
|
52 |
-
return False
|
53 |
-
|
54 |
-
def main():
|
55 |
-
# Ensure detectron2 is available
|
56 |
-
if not ensure_detectron2():
|
57 |
-
print("WARNING: Could not install detectron2, running in limited mode")
|
58 |
|
59 |
-
# Run minimal version without detectron2
|
60 |
import gradio as gr
|
|
|
61 |
|
62 |
-
|
63 |
-
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
-
# Normal operation with detectron2
|
75 |
try:
|
|
|
76 |
from gradio_test import create_gradio_interface
|
77 |
|
78 |
-
|
79 |
interface = create_gradio_interface()
|
80 |
-
|
|
|
|
|
81 |
server_name="0.0.0.0",
|
82 |
server_port=7860,
|
83 |
-
share=False
|
84 |
)
|
|
|
85 |
except Exception as e:
|
86 |
-
|
87 |
import traceback
|
88 |
traceback.print_exc()
|
|
|
89 |
|
90 |
if __name__ == "__main__":
|
91 |
main()
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
"""
|
3 |
+
NeuroNest Application Entry Point
|
4 |
+
Handles initialization and graceful startup for Hugging Face Spaces
|
5 |
"""
|
6 |
|
7 |
import os
|
8 |
import sys
|
9 |
+
import logging
|
10 |
+
import time
|
11 |
+
from pathlib import Path
|
12 |
|
13 |
+
# Configure logging
|
14 |
+
logging.basicConfig(
|
15 |
+
level=logging.INFO,
|
16 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
17 |
+
)
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
# Set environment variables
|
21 |
os.environ['CUDA_VISIBLE_DEVICES'] = ''
|
22 |
os.environ['FORCE_CUDA'] = '0'
|
23 |
|
24 |
+
def setup_oneformer_imports():
|
25 |
+
"""Add OneFormer to Python path if needed"""
|
26 |
+
oneformer_path = Path(__file__).parent / "oneformer"
|
27 |
+
if oneformer_path.exists() and str(oneformer_path) not in sys.path:
|
28 |
+
sys.path.insert(0, str(oneformer_path))
|
29 |
+
logger.info(f"Added OneFormer to path: {oneformer_path}")
|
30 |
+
|
31 |
+
def check_dependencies():
|
32 |
+
"""Check if all required dependencies are available"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
try:
|
34 |
+
import torch
|
35 |
+
logger.info(f"PyTorch version: {torch.__version__}")
|
36 |
+
logger.info(f"CUDA available: {torch.cuda.is_available()}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
import detectron2
|
39 |
+
logger.info(f"Detectron2 imported successfully")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
|
|
41 |
import gradio as gr
|
42 |
+
logger.info(f"Gradio version: {gr.__version__}")
|
43 |
|
44 |
+
import cv2
|
45 |
+
logger.info(f"OpenCV version: {cv2.__version__}")
|
46 |
|
47 |
+
import PIL
|
48 |
+
logger.info(f"Pillow version: {PIL.__version__}")
|
49 |
+
|
50 |
+
# Check PIL compatibility
|
51 |
+
if hasattr(PIL.Image, 'LINEAR'):
|
52 |
+
logger.info("PIL has LINEAR attribute")
|
53 |
+
elif hasattr(PIL.Image, 'BILINEAR'):
|
54 |
+
logger.info("PIL has BILINEAR attribute (newer version)")
|
55 |
+
# Monkey patch for compatibility
|
56 |
+
PIL.Image.LINEAR = PIL.Image.BILINEAR
|
57 |
+
logger.info("Applied PIL compatibility patch")
|
58 |
+
|
59 |
+
return True
|
60 |
+
|
61 |
+
except ImportError as e:
|
62 |
+
logger.error(f"Missing dependency: {e}")
|
63 |
+
return False
|
64 |
+
|
65 |
+
def main():
|
66 |
+
"""Main application entry point"""
|
67 |
+
print("=" * 50)
|
68 |
+
print(f"Application Startup at {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
69 |
+
print("=" * 50)
|
70 |
+
|
71 |
+
# Setup paths
|
72 |
+
setup_oneformer_imports()
|
73 |
+
|
74 |
+
# Check dependencies
|
75 |
+
if not check_dependencies():
|
76 |
+
logger.error("Dependency check failed")
|
77 |
+
sys.exit(1)
|
78 |
|
|
|
79 |
try:
|
80 |
+
# Import and launch the Gradio interface
|
81 |
from gradio_test import create_gradio_interface
|
82 |
|
83 |
+
logger.info("Creating Gradio interface...")
|
84 |
interface = create_gradio_interface()
|
85 |
+
|
86 |
+
logger.info("Launching application...")
|
87 |
+
interface.queue(max_size=10).launch(
|
88 |
server_name="0.0.0.0",
|
89 |
server_port=7860,
|
90 |
+
share=False # Disable share for production
|
91 |
)
|
92 |
+
|
93 |
except Exception as e:
|
94 |
+
logger.error(f"Error: {e}")
|
95 |
import traceback
|
96 |
traceback.print_exc()
|
97 |
+
sys.exit(1)
|
98 |
|
99 |
if __name__ == "__main__":
|
100 |
main()
|
utils/universal_contrast_analyzer.py
CHANGED
@@ -9,6 +9,8 @@ from typing import Dict, List, Tuple, Optional
|
|
9 |
import logging
|
10 |
from scipy.spatial import distance
|
11 |
from skimage.segmentation import find_boundaries
|
|
|
|
|
12 |
|
13 |
logger = logging.getLogger(__name__)
|
14 |
|
@@ -22,40 +24,53 @@ class UniversalContrastAnalyzer:
|
|
22 |
def __init__(self, wcag_threshold: float = 4.5):
|
23 |
self.wcag_threshold = wcag_threshold
|
24 |
|
25 |
-
# ADE20K semantic class mappings
|
26 |
self.semantic_classes = {
|
27 |
# Floors and ground surfaces
|
28 |
'floor': [3, 4, 13, 28, 78], # floor, wood floor, rug, carpet, mat
|
29 |
|
30 |
# Walls and vertical surfaces
|
31 |
-
'wall': [0, 1, 9], # wall, building, brick
|
32 |
|
33 |
# Ceiling
|
34 |
-
'ceiling': [5],
|
35 |
|
36 |
-
# Furniture
|
37 |
-
'furniture': [
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
40 |
|
41 |
# Doors and openings
|
42 |
-
'door': [25, 14], # door, windowpane
|
43 |
|
44 |
# Windows
|
45 |
-
'window': [8],
|
46 |
|
47 |
# Stairs and steps
|
48 |
'stairs': [53, 59], # stairs, step
|
49 |
|
50 |
# Small objects that might be on floors/furniture
|
51 |
-
'objects': [
|
52 |
-
|
53 |
-
|
|
|
|
|
54 |
|
55 |
# Kitchen/bathroom fixtures
|
56 |
-
'fixtures': [
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
}
|
60 |
|
61 |
# Create reverse mapping for quick lookup
|
@@ -70,7 +85,7 @@ class UniversalContrastAnalyzer:
|
|
70 |
# Normalize to 0-1
|
71 |
rgb_norm = rgb / 255.0
|
72 |
|
73 |
-
# Apply gamma correction
|
74 |
rgb_linear = np.where(
|
75 |
rgb_norm <= 0.03928,
|
76 |
rgb_norm / 12.92,
|
@@ -95,7 +110,7 @@ class UniversalContrastAnalyzer:
|
|
95 |
hsv1 = cv2.cvtColor(color1.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0]
|
96 |
hsv2 = cv2.cvtColor(color2.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0]
|
97 |
|
98 |
-
# Calculate circular hue difference
|
99 |
hue_diff = abs(hsv1[0] - hsv2[0])
|
100 |
if hue_diff > 90:
|
101 |
hue_diff = 180 - hue_diff
|
@@ -125,10 +140,23 @@ class UniversalContrastAnalyzer:
|
|
125 |
indices = np.random.choice(len(masked_pixels), sample_size, replace=False)
|
126 |
masked_pixels = masked_pixels[indices]
|
127 |
|
128 |
-
# Use
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
|
133 |
def find_adjacent_segments(self, segmentation: np.ndarray) -> Dict[Tuple[int, int], np.ndarray]:
|
134 |
"""
|
@@ -147,12 +175,16 @@ class UniversalContrastAnalyzer:
|
|
147 |
if boundaries[y, x]:
|
148 |
center_id = segmentation[y, x]
|
149 |
|
150 |
-
# Check
|
151 |
neighbors = [
|
152 |
-
segmentation[y-1, x],
|
153 |
-
segmentation[y+1, x],
|
154 |
-
segmentation[y, x-1],
|
155 |
-
segmentation[y, x+1]
|
|
|
|
|
|
|
|
|
156 |
]
|
157 |
|
158 |
for neighbor_id in neighbors:
|
@@ -166,7 +198,7 @@ class UniversalContrastAnalyzer:
|
|
166 |
adjacencies[pair][y, x] = True
|
167 |
|
168 |
# Filter out small boundaries (noise)
|
169 |
-
min_boundary_pixels =
|
170 |
filtered_adjacencies = {}
|
171 |
for pair, boundary in adjacencies.items():
|
172 |
if np.sum(boundary) >= min_boundary_pixels:
|
@@ -174,35 +206,57 @@ class UniversalContrastAnalyzer:
|
|
174 |
|
175 |
return filtered_adjacencies
|
176 |
|
177 |
-
def
|
178 |
-
|
179 |
"""
|
180 |
-
Determine if
|
181 |
-
|
182 |
"""
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
|
205 |
-
return
|
206 |
|
207 |
def analyze_contrast(self, image: np.ndarray, segmentation: np.ndarray) -> Dict:
|
208 |
"""
|
@@ -224,6 +278,8 @@ class UniversalContrastAnalyzer:
|
|
224 |
'analyzed_pairs': 0,
|
225 |
'low_contrast_pairs': 0,
|
226 |
'critical_issues': 0,
|
|
|
|
|
227 |
'floor_object_issues': 0
|
228 |
}
|
229 |
}
|
@@ -235,11 +291,14 @@ class UniversalContrastAnalyzer:
|
|
235 |
|
236 |
# Build segment information
|
237 |
segment_info = {}
|
238 |
-
|
|
|
239 |
|
240 |
for seg_id in unique_segments:
|
241 |
mask = segmentation == seg_id
|
242 |
-
|
|
|
|
|
243 |
continue
|
244 |
|
245 |
category = self.class_to_category.get(seg_id, 'unknown')
|
@@ -249,16 +308,14 @@ class UniversalContrastAnalyzer:
|
|
249 |
'category': category,
|
250 |
'mask': mask,
|
251 |
'color': color,
|
252 |
-
'area':
|
253 |
'class_id': seg_id
|
254 |
}
|
255 |
-
|
256 |
-
# Track floor segments
|
257 |
-
if category == 'floor':
|
258 |
-
floor_segments.append(seg_id)
|
259 |
|
260 |
# Find all adjacent segment pairs
|
|
|
261 |
adjacencies = self.find_adjacent_segments(segmentation)
|
|
|
262 |
|
263 |
# Analyze each adjacent pair
|
264 |
for (seg1_id, seg2_id), boundary in adjacencies.items():
|
@@ -268,63 +325,52 @@ class UniversalContrastAnalyzer:
|
|
268 |
info1 = segment_info[seg1_id]
|
269 |
info2 = segment_info[seg2_id]
|
270 |
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
wcag_ratio = self.calculate_wcag_contrast(info1['color'], info2['color'])
|
275 |
-
hue_diff = self.calculate_hue_difference(info1['color'], info2['color'])
|
276 |
-
sat_diff = self.calculate_saturation_difference(info1['color'], info2['color'])
|
277 |
-
|
278 |
-
# Determine if there's insufficient contrast
|
279 |
-
has_issue = False
|
280 |
-
severity = 'low'
|
281 |
|
282 |
-
|
283 |
-
if wcag_ratio < self.wcag_threshold:
|
284 |
-
has_issue = True
|
285 |
-
if wcag_ratio < 3.0:
|
286 |
-
severity = 'critical'
|
287 |
-
elif wcag_ratio < 4.0:
|
288 |
-
severity = 'high'
|
289 |
-
else:
|
290 |
-
severity = 'medium'
|
291 |
|
292 |
-
#
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
|
298 |
-
if
|
299 |
results['statistics']['low_contrast_pairs'] += 1
|
300 |
|
301 |
-
#
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
if info1['category'] == 'floor':
|
306 |
-
floor_info, obj_info = info1, info2
|
307 |
-
else:
|
308 |
-
floor_info, obj_info = info2, info1
|
309 |
-
|
310 |
-
if self.is_object_on_surface(obj_info['mask'], floor_info['mask']):
|
311 |
-
is_floor_object = True
|
312 |
-
results['statistics']['floor_object_issues'] += 1
|
313 |
-
if severity != 'critical':
|
314 |
-
severity = 'high' # Elevate floor-object issues
|
315 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
if severity == 'critical':
|
317 |
results['statistics']['critical_issues'] += 1
|
|
|
|
|
|
|
|
|
318 |
|
319 |
# Record the issue
|
320 |
issue = {
|
321 |
'segment_ids': (seg1_id, seg2_id),
|
322 |
'categories': (info1['category'], info2['category']),
|
323 |
-
'colors': (info1['color'], info2['color']),
|
324 |
-
'wcag_ratio': wcag_ratio,
|
325 |
-
'hue_difference': hue_diff,
|
326 |
-
'saturation_difference': sat_diff,
|
327 |
-
'boundary_pixels': np.sum(boundary),
|
328 |
'severity': severity,
|
329 |
'is_floor_object': is_floor_object,
|
330 |
'boundary_mask': boundary
|
@@ -336,8 +382,10 @@ class UniversalContrastAnalyzer:
|
|
336 |
self._visualize_issue(results['visualization'], boundary, severity)
|
337 |
|
338 |
# Sort issues by severity
|
339 |
-
severity_order = {'critical': 0, 'high': 1, 'medium': 2
|
340 |
-
results['issues'].sort(key=lambda x: severity_order
|
|
|
|
|
341 |
|
342 |
return results
|
343 |
|
@@ -348,7 +396,6 @@ class UniversalContrastAnalyzer:
|
|
348 |
'critical': (255, 0, 0), # Red
|
349 |
'high': (255, 128, 0), # Orange
|
350 |
'medium': (255, 255, 0), # Yellow
|
351 |
-
'low': (128, 255, 128) # Light green
|
352 |
}
|
353 |
|
354 |
color = colors.get(severity, (255, 255, 255))
|
@@ -357,8 +404,10 @@ class UniversalContrastAnalyzer:
|
|
357 |
kernel = np.ones((3, 3), np.uint8)
|
358 |
dilated = cv2.dilate(boundary.astype(np.uint8), kernel, iterations=2)
|
359 |
|
360 |
-
# Apply color overlay
|
361 |
-
|
|
|
|
|
362 |
|
363 |
return image
|
364 |
|
@@ -374,22 +423,26 @@ class UniversalContrastAnalyzer:
|
|
374 |
report.append(f"Total segments analyzed: {stats['total_segments']}")
|
375 |
report.append(f"Adjacent pairs analyzed: {stats['analyzed_pairs']}")
|
376 |
report.append(f"Low contrast pairs found: {stats['low_contrast_pairs']}")
|
377 |
-
report.append(f"Critical issues: {stats['critical_issues']}")
|
|
|
|
|
378 |
report.append(f"Floor-object contrast issues: {stats['floor_object_issues']}\n")
|
379 |
|
380 |
# Detailed issues
|
381 |
if issues:
|
382 |
report.append("=== Contrast Issues (sorted by severity) ===\n")
|
383 |
|
384 |
-
for i, issue in enumerate(issues, 1):
|
385 |
cat1, cat2 = issue['categories']
|
386 |
wcag = issue['wcag_ratio']
|
|
|
|
|
387 |
severity = issue['severity'].upper()
|
388 |
|
389 |
report.append(f"{i}. [{severity}] {cat1} ↔ {cat2}")
|
390 |
-
report.append(f" - WCAG Contrast Ratio: {wcag:.2f} (minimum:
|
391 |
-
report.append(f" - Hue Difference: {
|
392 |
-
report.append(f" - Saturation Difference: {
|
393 |
|
394 |
if issue['is_floor_object']:
|
395 |
report.append(" - ⚠️ Object on floor - requires high visibility!")
|
@@ -399,4 +452,4 @@ class UniversalContrastAnalyzer:
|
|
399 |
else:
|
400 |
report.append("✅ No contrast issues detected!")
|
401 |
|
402 |
-
return "\n".join(report)
|
|
|
9 |
import logging
|
10 |
from scipy.spatial import distance
|
11 |
from skimage.segmentation import find_boundaries
|
12 |
+
from sklearn.cluster import DBSCAN
|
13 |
+
import colorsys
|
14 |
|
15 |
logger = logging.getLogger(__name__)
|
16 |
|
|
|
24 |
def __init__(self, wcag_threshold: float = 4.5):
|
25 |
self.wcag_threshold = wcag_threshold
|
26 |
|
27 |
+
# Comprehensive ADE20K semantic class mappings
|
28 |
self.semantic_classes = {
|
29 |
# Floors and ground surfaces
|
30 |
'floor': [3, 4, 13, 28, 78], # floor, wood floor, rug, carpet, mat
|
31 |
|
32 |
# Walls and vertical surfaces
|
33 |
+
'wall': [0, 1, 9, 21], # wall, building, brick, house
|
34 |
|
35 |
# Ceiling
|
36 |
+
'ceiling': [5, 16], # ceiling, sky (for rooms with skylights)
|
37 |
|
38 |
+
# Furniture - expanded list
|
39 |
+
'furniture': [
|
40 |
+
10, 19, 15, 7, 18, 23, 30, 33, 34, 36, 44, 45, 57, 63, 64, 65, 75,
|
41 |
+
# sofa, chair, table, bed, armchair, cabinet, desk, counter, stool,
|
42 |
+
# bench, nightstand, coffee table, ottoman, wardrobe, dresser, shelf,
|
43 |
+
# chest of drawers
|
44 |
+
],
|
45 |
|
46 |
# Doors and openings
|
47 |
+
'door': [25, 14, 79], # door, windowpane, screen door
|
48 |
|
49 |
# Windows
|
50 |
+
'window': [8, 14], # window, windowpane
|
51 |
|
52 |
# Stairs and steps
|
53 |
'stairs': [53, 59], # stairs, step
|
54 |
|
55 |
# Small objects that might be on floors/furniture
|
56 |
+
'objects': [
|
57 |
+
17, 20, 24, 37, 38, 39, 42, 62, 68, 71, 73, 80, 82, 84, 89, 90, 92, 93,
|
58 |
+
# curtain, book, picture, towel, clothes, pillow, box, bag, lamp, fan,
|
59 |
+
# cushion, basket, bottle, plate, clock, vase, tray, bowl
|
60 |
+
],
|
61 |
|
62 |
# Kitchen/bathroom fixtures
|
63 |
+
'fixtures': [
|
64 |
+
32, 46, 49, 50, 54, 66, 69, 70, 77, 94, 97, 98, 99, 117, 118, 119, 120,
|
65 |
+
# sink, toilet, bathtub, shower, dishwasher, oven, microwave,
|
66 |
+
# refrigerator, stove, washer, dryer, range hood, kitchen island
|
67 |
+
],
|
68 |
+
|
69 |
+
# Decorative elements
|
70 |
+
'decorative': [
|
71 |
+
6, 12, 56, 60, 61, 72, 83, 91, 96, 100, 102, 104, 106, 110, 112,
|
72 |
+
# painting, mirror, sculpture, chandelier, sconce, poster, tapestry
|
73 |
+
]
|
74 |
}
|
75 |
|
76 |
# Create reverse mapping for quick lookup
|
|
|
85 |
# Normalize to 0-1
|
86 |
rgb_norm = rgb / 255.0
|
87 |
|
88 |
+
# Apply gamma correction (linearize)
|
89 |
rgb_linear = np.where(
|
90 |
rgb_norm <= 0.03928,
|
91 |
rgb_norm / 12.92,
|
|
|
110 |
hsv1 = cv2.cvtColor(color1.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0]
|
111 |
hsv2 = cv2.cvtColor(color2.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0]
|
112 |
|
113 |
+
# Calculate circular hue difference (0-180 range in OpenCV)
|
114 |
hue_diff = abs(hsv1[0] - hsv2[0])
|
115 |
if hue_diff > 90:
|
116 |
hue_diff = 180 - hue_diff
|
|
|
140 |
indices = np.random.choice(len(masked_pixels), sample_size, replace=False)
|
141 |
masked_pixels = masked_pixels[indices]
|
142 |
|
143 |
+
# Use DBSCAN clustering to find dominant color cluster
|
144 |
+
if len(masked_pixels) > 50:
|
145 |
+
try:
|
146 |
+
clustering = DBSCAN(eps=30, min_samples=10).fit(masked_pixels)
|
147 |
+
labels = clustering.labels_
|
148 |
+
|
149 |
+
# Get the largest cluster
|
150 |
+
unique_labels, counts = np.unique(labels[labels >= 0], return_counts=True)
|
151 |
+
if len(unique_labels) > 0:
|
152 |
+
dominant_label = unique_labels[np.argmax(counts)]
|
153 |
+
dominant_colors = masked_pixels[labels == dominant_label]
|
154 |
+
return np.median(dominant_colors, axis=0).astype(int)
|
155 |
+
except:
|
156 |
+
pass
|
157 |
+
|
158 |
+
# Fallback to median
|
159 |
+
return np.median(masked_pixels, axis=0).astype(int)
|
160 |
|
161 |
def find_adjacent_segments(self, segmentation: np.ndarray) -> Dict[Tuple[int, int], np.ndarray]:
|
162 |
"""
|
|
|
175 |
if boundaries[y, x]:
|
176 |
center_id = segmentation[y, x]
|
177 |
|
178 |
+
# Check 8-connected neighbors for more complete boundaries
|
179 |
neighbors = [
|
180 |
+
segmentation[y-1, x], # top
|
181 |
+
segmentation[y+1, x], # bottom
|
182 |
+
segmentation[y, x-1], # left
|
183 |
+
segmentation[y, x+1], # right
|
184 |
+
segmentation[y-1, x-1], # top-left
|
185 |
+
segmentation[y-1, x+1], # top-right
|
186 |
+
segmentation[y+1, x-1], # bottom-left
|
187 |
+
segmentation[y+1, x+1] # bottom-right
|
188 |
]
|
189 |
|
190 |
for neighbor_id in neighbors:
|
|
|
198 |
adjacencies[pair][y, x] = True
|
199 |
|
200 |
# Filter out small boundaries (noise)
|
201 |
+
min_boundary_pixels = 20 # Reduced threshold for better detection
|
202 |
filtered_adjacencies = {}
|
203 |
for pair, boundary in adjacencies.items():
|
204 |
if np.sum(boundary) >= min_boundary_pixels:
|
|
|
206 |
|
207 |
return filtered_adjacencies
|
208 |
|
209 |
+
def is_contrast_sufficient(self, color1: np.ndarray, color2: np.ndarray,
|
210 |
+
category1: str, category2: str) -> Tuple[bool, str]:
|
211 |
"""
|
212 |
+
Determine if contrast is sufficient based on WCAG and perceptual guidelines.
|
213 |
+
Returns (is_sufficient, severity_if_not)
|
214 |
"""
|
215 |
+
wcag_ratio = self.calculate_wcag_contrast(color1, color2)
|
216 |
+
hue_diff = self.calculate_hue_difference(color1, color2)
|
217 |
+
sat_diff = self.calculate_saturation_difference(color1, color2)
|
218 |
+
|
219 |
+
# Critical relationships requiring highest contrast
|
220 |
+
critical_pairs = [
|
221 |
+
('floor', 'stairs'),
|
222 |
+
('floor', 'door'),
|
223 |
+
('stairs', 'wall')
|
224 |
+
]
|
225 |
+
|
226 |
+
# High priority relationships
|
227 |
+
high_priority_pairs = [
|
228 |
+
('floor', 'furniture'),
|
229 |
+
('wall', 'door'),
|
230 |
+
('wall', 'furniture'),
|
231 |
+
('floor', 'objects')
|
232 |
+
]
|
233 |
+
|
234 |
+
# Check relationship type
|
235 |
+
relationship = tuple(sorted([category1, category2]))
|
236 |
+
|
237 |
+
# Determine thresholds based on relationship
|
238 |
+
if relationship in critical_pairs:
|
239 |
+
# Critical: require 7:1 contrast ratio
|
240 |
+
if wcag_ratio < 7.0:
|
241 |
+
return False, 'critical'
|
242 |
+
if hue_diff < 30 and sat_diff < 50:
|
243 |
+
return False, 'critical'
|
244 |
+
|
245 |
+
elif relationship in high_priority_pairs:
|
246 |
+
# High priority: require 4.5:1 contrast ratio
|
247 |
+
if wcag_ratio < 4.5:
|
248 |
+
return False, 'high'
|
249 |
+
if wcag_ratio < 7.0 and hue_diff < 20 and sat_diff < 40:
|
250 |
+
return False, 'high'
|
251 |
+
|
252 |
+
else:
|
253 |
+
# Standard: require 3:1 contrast ratio minimum
|
254 |
+
if wcag_ratio < 3.0:
|
255 |
+
return False, 'medium'
|
256 |
+
if wcag_ratio < 4.5 and hue_diff < 15 and sat_diff < 30:
|
257 |
+
return False, 'medium'
|
258 |
|
259 |
+
return True, None
|
260 |
|
261 |
def analyze_contrast(self, image: np.ndarray, segmentation: np.ndarray) -> Dict:
|
262 |
"""
|
|
|
278 |
'analyzed_pairs': 0,
|
279 |
'low_contrast_pairs': 0,
|
280 |
'critical_issues': 0,
|
281 |
+
'high_priority_issues': 0,
|
282 |
+
'medium_priority_issues': 0,
|
283 |
'floor_object_issues': 0
|
284 |
}
|
285 |
}
|
|
|
291 |
|
292 |
# Build segment information
|
293 |
segment_info = {}
|
294 |
+
|
295 |
+
logger.info(f"Building segment information for {len(unique_segments)} segments...")
|
296 |
|
297 |
for seg_id in unique_segments:
|
298 |
mask = segmentation == seg_id
|
299 |
+
area = np.sum(mask)
|
300 |
+
|
301 |
+
if area < 50: # Skip very small segments
|
302 |
continue
|
303 |
|
304 |
category = self.class_to_category.get(seg_id, 'unknown')
|
|
|
308 |
'category': category,
|
309 |
'mask': mask,
|
310 |
'color': color,
|
311 |
+
'area': area,
|
312 |
'class_id': seg_id
|
313 |
}
|
|
|
|
|
|
|
|
|
314 |
|
315 |
# Find all adjacent segment pairs
|
316 |
+
logger.info("Finding adjacent segments...")
|
317 |
adjacencies = self.find_adjacent_segments(segmentation)
|
318 |
+
logger.info(f"Found {len(adjacencies)} adjacent segment pairs")
|
319 |
|
320 |
# Analyze each adjacent pair
|
321 |
for (seg1_id, seg2_id), boundary in adjacencies.items():
|
|
|
325 |
info1 = segment_info[seg1_id]
|
326 |
info2 = segment_info[seg2_id]
|
327 |
|
328 |
+
# Skip if both are unknown categories
|
329 |
+
if info1['category'] == 'unknown' and info2['category'] == 'unknown':
|
330 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
331 |
|
332 |
+
results['statistics']['analyzed_pairs'] += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
|
334 |
+
# Check contrast sufficiency
|
335 |
+
is_sufficient, severity = self.is_contrast_sufficient(
|
336 |
+
info1['color'], info2['color'],
|
337 |
+
info1['category'], info2['category']
|
338 |
+
)
|
339 |
|
340 |
+
if not is_sufficient:
|
341 |
results['statistics']['low_contrast_pairs'] += 1
|
342 |
|
343 |
+
# Calculate detailed metrics
|
344 |
+
wcag_ratio = self.calculate_wcag_contrast(info1['color'], info2['color'])
|
345 |
+
hue_diff = self.calculate_hue_difference(info1['color'], info2['color'])
|
346 |
+
sat_diff = self.calculate_saturation_difference(info1['color'], info2['color'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
347 |
|
348 |
+
# Check if it's a floor-object issue
|
349 |
+
is_floor_object = (
|
350 |
+
(info1['category'] == 'floor' and info2['category'] in ['furniture', 'objects']) or
|
351 |
+
(info2['category'] == 'floor' and info1['category'] in ['furniture', 'objects'])
|
352 |
+
)
|
353 |
+
|
354 |
+
if is_floor_object:
|
355 |
+
results['statistics']['floor_object_issues'] += 1
|
356 |
+
|
357 |
+
# Count by severity
|
358 |
if severity == 'critical':
|
359 |
results['statistics']['critical_issues'] += 1
|
360 |
+
elif severity == 'high':
|
361 |
+
results['statistics']['high_priority_issues'] += 1
|
362 |
+
elif severity == 'medium':
|
363 |
+
results['statistics']['medium_priority_issues'] += 1
|
364 |
|
365 |
# Record the issue
|
366 |
issue = {
|
367 |
'segment_ids': (seg1_id, seg2_id),
|
368 |
'categories': (info1['category'], info2['category']),
|
369 |
+
'colors': (info1['color'].tolist(), info2['color'].tolist()),
|
370 |
+
'wcag_ratio': float(wcag_ratio),
|
371 |
+
'hue_difference': float(hue_diff),
|
372 |
+
'saturation_difference': float(sat_diff),
|
373 |
+
'boundary_pixels': int(np.sum(boundary)),
|
374 |
'severity': severity,
|
375 |
'is_floor_object': is_floor_object,
|
376 |
'boundary_mask': boundary
|
|
|
382 |
self._visualize_issue(results['visualization'], boundary, severity)
|
383 |
|
384 |
# Sort issues by severity
|
385 |
+
severity_order = {'critical': 0, 'high': 1, 'medium': 2}
|
386 |
+
results['issues'].sort(key=lambda x: severity_order.get(x['severity'], 3))
|
387 |
+
|
388 |
+
logger.info(f"Contrast analysis complete: {results['statistics']['low_contrast_pairs']} issues found")
|
389 |
|
390 |
return results
|
391 |
|
|
|
396 |
'critical': (255, 0, 0), # Red
|
397 |
'high': (255, 128, 0), # Orange
|
398 |
'medium': (255, 255, 0), # Yellow
|
|
|
399 |
}
|
400 |
|
401 |
color = colors.get(severity, (255, 255, 255))
|
|
|
404 |
kernel = np.ones((3, 3), np.uint8)
|
405 |
dilated = cv2.dilate(boundary.astype(np.uint8), kernel, iterations=2)
|
406 |
|
407 |
+
# Apply color overlay with transparency
|
408 |
+
overlay = image.copy()
|
409 |
+
overlay[dilated > 0] = color
|
410 |
+
cv2.addWeighted(overlay, 0.5, image, 0.5, 0, image)
|
411 |
|
412 |
return image
|
413 |
|
|
|
423 |
report.append(f"Total segments analyzed: {stats['total_segments']}")
|
424 |
report.append(f"Adjacent pairs analyzed: {stats['analyzed_pairs']}")
|
425 |
report.append(f"Low contrast pairs found: {stats['low_contrast_pairs']}")
|
426 |
+
report.append(f"- Critical issues: {stats['critical_issues']}")
|
427 |
+
report.append(f"- High priority issues: {stats['high_priority_issues']}")
|
428 |
+
report.append(f"- Medium priority issues: {stats['medium_priority_issues']}")
|
429 |
report.append(f"Floor-object contrast issues: {stats['floor_object_issues']}\n")
|
430 |
|
431 |
# Detailed issues
|
432 |
if issues:
|
433 |
report.append("=== Contrast Issues (sorted by severity) ===\n")
|
434 |
|
435 |
+
for i, issue in enumerate(issues[:10], 1): # Show top 10 issues
|
436 |
cat1, cat2 = issue['categories']
|
437 |
wcag = issue['wcag_ratio']
|
438 |
+
hue_diff = issue['hue_difference']
|
439 |
+
sat_diff = issue['saturation_difference']
|
440 |
severity = issue['severity'].upper()
|
441 |
|
442 |
report.append(f"{i}. [{severity}] {cat1} ↔ {cat2}")
|
443 |
+
report.append(f" - WCAG Contrast Ratio: {wcag:.2f}:1 (minimum: 4.5:1)")
|
444 |
+
report.append(f" - Hue Difference: {hue_diff:.1f}° (recommended: >30°)")
|
445 |
+
report.append(f" - Saturation Difference: {sat_diff} (recommended: >50)")
|
446 |
|
447 |
if issue['is_floor_object']:
|
448 |
report.append(" - ⚠️ Object on floor - requires high visibility!")
|
|
|
452 |
else:
|
453 |
report.append("✅ No contrast issues detected!")
|
454 |
|
455 |
+
return "\n".join(report)
|