added changes to gradio_test.py for gradio dependency issues
Browse files- Dockerfile +36 -35
- gradio_test.py +272 -569
Dockerfile
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
FROM ubuntu:
|
2 |
|
3 |
ENV DEBIAN_FRONTEND=noninteractive
|
4 |
|
5 |
# Install system dependencies
|
6 |
RUN apt-get update && apt-get install -y \
|
7 |
-
python3.
|
8 |
-
python3.
|
9 |
python3-pip \
|
10 |
git \
|
11 |
wget \
|
@@ -24,57 +24,58 @@ RUN apt-get update && apt-get install -y \
|
|
24 |
libgomp1 \
|
25 |
&& rm -rf /var/lib/apt/lists/*
|
26 |
|
27 |
-
# Set python3.
|
28 |
-
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.
|
29 |
-
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.
|
30 |
|
31 |
# Upgrade pip
|
32 |
-
RUN python -m pip install --upgrade pip==
|
33 |
|
34 |
# Create user
|
35 |
RUN useradd -m -u 1000 user
|
36 |
WORKDIR /app
|
37 |
|
38 |
-
# Install PyTorch CPU
|
39 |
-
RUN pip install torch==1.
|
40 |
|
41 |
# Install core dependencies with compatible versions
|
42 |
RUN pip install \
|
43 |
-
numpy==1.
|
44 |
-
Pillow==
|
45 |
-
opencv-python==4.
|
46 |
-
cython==0.29.
|
47 |
|
48 |
# Install detectron2 CPU
|
49 |
-
RUN pip install detectron2==0.6 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.
|
50 |
|
51 |
-
# Install compatible versions for
|
52 |
RUN pip install \
|
53 |
-
gradio==3.
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
61 |
imutils==0.5.4 \
|
62 |
-
shapely==
|
63 |
-
h5py==3.
|
64 |
-
regex==
|
65 |
-
ftfy==6.
|
66 |
-
inflect==
|
67 |
-
gdown==4.
|
68 |
wget==3.2
|
69 |
|
70 |
-
# Optional dependencies
|
71 |
-
RUN pip install submitit==1.4.
|
72 |
-
RUN pip install pytorch_lightning==1.5
|
73 |
-
RUN pip install wandb==0.
|
74 |
-
RUN pip install icecream==2.1.1 || echo "icecream failed"
|
75 |
|
76 |
# Try NATTEN
|
77 |
-
RUN pip install natten==0.14.6 -f https://shi-labs.com/natten/wheels/cpu/torch1.
|
78 |
echo "NATTEN installation failed"
|
79 |
|
80 |
# Switch to user
|
|
|
1 |
+
FROM ubuntu:20.04
|
2 |
|
3 |
ENV DEBIAN_FRONTEND=noninteractive
|
4 |
|
5 |
# Install system dependencies
|
6 |
RUN apt-get update && apt-get install -y \
|
7 |
+
python3.9 \
|
8 |
+
python3.9-dev \
|
9 |
python3-pip \
|
10 |
git \
|
11 |
wget \
|
|
|
24 |
libgomp1 \
|
25 |
&& rm -rf /var/lib/apt/lists/*
|
26 |
|
27 |
+
# Set python3.9 as default
|
28 |
+
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1 && \
|
29 |
+
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
|
30 |
|
31 |
# Upgrade pip
|
32 |
+
RUN python -m pip install --upgrade pip==23.0.1 setuptools==65.5.0 wheel
|
33 |
|
34 |
# Create user
|
35 |
RUN useradd -m -u 1000 user
|
36 |
WORKDIR /app
|
37 |
|
38 |
+
# Install PyTorch CPU
|
39 |
+
RUN pip install torch==1.13.1+cpu torchvision==0.14.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
40 |
|
41 |
# Install core dependencies with compatible versions
|
42 |
RUN pip install \
|
43 |
+
numpy==1.21.6 \
|
44 |
+
Pillow==9.3.0 \
|
45 |
+
opencv-python==4.7.0.72 \
|
46 |
+
cython==0.29.35
|
47 |
|
48 |
# Install detectron2 CPU
|
49 |
+
RUN pip install detectron2==0.6 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.13/index.html
|
50 |
|
51 |
+
# Install compatible versions - Fixed for httpx/gradio compatibility
|
52 |
RUN pip install \
|
53 |
+
gradio==3.50.2 \
|
54 |
+
httpx==0.24.1 \
|
55 |
+
httpcore==0.17.3 \
|
56 |
+
huggingface_hub==0.16.4 \
|
57 |
+
scipy==1.9.3 \
|
58 |
+
scikit-image==0.19.3 \
|
59 |
+
scikit-learn==1.2.2 \
|
60 |
+
timm==0.6.13 \
|
61 |
+
einops==0.6.1 \
|
62 |
+
tqdm==4.65.0 \
|
63 |
imutils==0.5.4 \
|
64 |
+
shapely==2.0.1 \
|
65 |
+
h5py==3.8.0 \
|
66 |
+
regex==2023.3.23 \
|
67 |
+
ftfy==6.1.1 \
|
68 |
+
inflect==6.0.4 \
|
69 |
+
gdown==4.7.1 \
|
70 |
wget==3.2
|
71 |
|
72 |
+
# Optional dependencies
|
73 |
+
RUN pip install submitit==1.4.5 || echo "submitit failed"
|
74 |
+
RUN pip install pytorch_lightning==1.9.5 || echo "pytorch_lightning failed"
|
75 |
+
RUN pip install wandb==0.15.0 || echo "wandb failed"
|
|
|
76 |
|
77 |
# Try NATTEN
|
78 |
+
RUN pip install natten==0.14.6 -f https://shi-labs.com/natten/wheels/cpu/torch1.13/index.html || \
|
79 |
echo "NATTEN installation failed"
|
80 |
|
81 |
# Switch to user
|
gradio_test.py
CHANGED
@@ -72,183 +72,6 @@ ONEFORMER_CONFIG = {
|
|
72 |
|
73 |
from utils.universal_contrast_analyzer import UniversalContrastAnalyzer
|
74 |
|
75 |
-
# Keep old class for compatibility but deprecated
|
76 |
-
class RobustContrastAnalyzer:
|
77 |
-
"""Advanced contrast analyzer for Alzheimer's-friendly environments"""
|
78 |
-
|
79 |
-
def __init__(self, wcag_threshold: float = 4.5):
|
80 |
-
self.wcag_threshold = wcag_threshold
|
81 |
-
|
82 |
-
# ADE20K class mappings for important objects
|
83 |
-
self.semantic_classes = {
|
84 |
-
'floor': [3, 4, 13, 28, 78], # floor, wood floor, rug, carpet, mat
|
85 |
-
'wall': [0, 1, 9], # wall, building, brick
|
86 |
-
'ceiling': [5], # ceiling
|
87 |
-
'furniture': [10, 19, 15, 7, 18, 23], # sofa, chair, table, bed, armchair, cabinet
|
88 |
-
'door': [25], # door
|
89 |
-
'window': [8], # window
|
90 |
-
'stairs': [53], # stairs
|
91 |
-
}
|
92 |
-
|
93 |
-
# Priority relationships for safety
|
94 |
-
self.priority_relationships = {
|
95 |
-
('floor', 'furniture'): ('critical', 'Furniture must be clearly visible against floor'),
|
96 |
-
('floor', 'stairs'): ('critical', 'Stairs must have clear contrast with floor'),
|
97 |
-
('floor', 'door'): ('high', 'Door should be easily distinguishable from floor'),
|
98 |
-
('wall', 'furniture'): ('high', 'Furniture should stand out from walls'),
|
99 |
-
('wall', 'door'): ('high', 'Doors should be clearly visible on walls'),
|
100 |
-
('wall', 'window'): ('medium', 'Windows should have adequate contrast'),
|
101 |
-
('ceiling', 'wall'): ('low', 'Ceiling-wall contrast is less critical'),
|
102 |
-
}
|
103 |
-
|
104 |
-
def get_object_category(self, class_id: int) -> str:
|
105 |
-
"""Map segmentation class to object category"""
|
106 |
-
for category, class_ids in self.semantic_classes.items():
|
107 |
-
if class_id in class_ids:
|
108 |
-
return category
|
109 |
-
return 'other'
|
110 |
-
|
111 |
-
def calculate_wcag_contrast(self, color1: np.ndarray, color2: np.ndarray) -> float:
|
112 |
-
"""Calculate WCAG contrast ratio"""
|
113 |
-
def relative_luminance(rgb):
|
114 |
-
rgb_norm = rgb / 255.0
|
115 |
-
rgb_linear = np.where(rgb_norm <= 0.03928,
|
116 |
-
rgb_norm / 12.92,
|
117 |
-
((rgb_norm + 0.055) / 1.055) ** 2.4)
|
118 |
-
return np.dot(rgb_linear, [0.2126, 0.7152, 0.0722])
|
119 |
-
|
120 |
-
lum1 = relative_luminance(color1)
|
121 |
-
lum2 = relative_luminance(color2)
|
122 |
-
|
123 |
-
lighter = max(lum1, lum2)
|
124 |
-
darker = min(lum1, lum2)
|
125 |
-
|
126 |
-
return (lighter + 0.05) / (darker + 0.05)
|
127 |
-
|
128 |
-
def extract_dominant_color(self, image: np.ndarray, mask: np.ndarray) -> np.ndarray:
|
129 |
-
"""Extract dominant color from masked region"""
|
130 |
-
if not np.any(mask):
|
131 |
-
return np.array([128, 128, 128])
|
132 |
-
|
133 |
-
masked_pixels = image[mask]
|
134 |
-
if len(masked_pixels) == 0:
|
135 |
-
return np.array([128, 128, 128])
|
136 |
-
|
137 |
-
# Use median for robustness against outliers
|
138 |
-
return np.median(masked_pixels, axis=0).astype(int)
|
139 |
-
|
140 |
-
def find_adjacent_segments(self, seg1_mask: np.ndarray, seg2_mask: np.ndarray,
|
141 |
-
min_boundary_length: int = 30) -> np.ndarray:
|
142 |
-
"""Find clean boundaries between segments"""
|
143 |
-
kernel = np.ones((3, 3), np.uint8)
|
144 |
-
dilated1 = cv2.dilate(seg1_mask.astype(np.uint8), kernel, iterations=1)
|
145 |
-
dilated2 = cv2.dilate(seg2_mask.astype(np.uint8), kernel, iterations=1)
|
146 |
-
|
147 |
-
boundary = dilated1 & dilated2
|
148 |
-
|
149 |
-
# Remove small disconnected components
|
150 |
-
contours, _ = cv2.findContours(boundary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
151 |
-
clean_boundary = np.zeros_like(boundary)
|
152 |
-
|
153 |
-
for contour in contours:
|
154 |
-
if cv2.contourArea(contour) >= min_boundary_length:
|
155 |
-
cv2.fillPoly(clean_boundary, [contour], 1)
|
156 |
-
|
157 |
-
return clean_boundary.astype(bool)
|
158 |
-
|
159 |
-
def analyze_contrast(self, image: np.ndarray, segmentation: np.ndarray) -> Dict:
|
160 |
-
"""Perform comprehensive contrast analysis"""
|
161 |
-
h, w = segmentation.shape
|
162 |
-
results = {
|
163 |
-
'critical_issues': [],
|
164 |
-
'high_issues': [],
|
165 |
-
'medium_issues': [],
|
166 |
-
'visualization': image.copy(),
|
167 |
-
'statistics': {}
|
168 |
-
}
|
169 |
-
|
170 |
-
# Build segment information
|
171 |
-
unique_segments = np.unique(segmentation)
|
172 |
-
segment_info = {}
|
173 |
-
|
174 |
-
for seg_id in unique_segments:
|
175 |
-
if seg_id == 0: # Skip background
|
176 |
-
continue
|
177 |
-
|
178 |
-
mask = segmentation == seg_id
|
179 |
-
if np.sum(mask) < 100: # Skip very small segments
|
180 |
-
continue
|
181 |
-
|
182 |
-
category = self.get_object_category(seg_id)
|
183 |
-
if category == 'other':
|
184 |
-
continue
|
185 |
-
|
186 |
-
segment_info[seg_id] = {
|
187 |
-
'category': category,
|
188 |
-
'mask': mask,
|
189 |
-
'color': self.extract_dominant_color(image, mask),
|
190 |
-
'area': np.sum(mask)
|
191 |
-
}
|
192 |
-
|
193 |
-
# Analyze priority relationships
|
194 |
-
issue_counts = {'critical': 0, 'high': 0, 'medium': 0}
|
195 |
-
|
196 |
-
for seg_id1, info1 in segment_info.items():
|
197 |
-
for seg_id2, info2 in segment_info.items():
|
198 |
-
if seg_id1 >= seg_id2:
|
199 |
-
continue
|
200 |
-
|
201 |
-
# Check if this is a priority relationship
|
202 |
-
relationship = tuple(sorted([info1['category'], info2['category']]))
|
203 |
-
if relationship not in self.priority_relationships:
|
204 |
-
continue
|
205 |
-
|
206 |
-
priority, description = self.priority_relationships[relationship]
|
207 |
-
|
208 |
-
# Check if segments are adjacent
|
209 |
-
boundary = self.find_adjacent_segments(info1['mask'], info2['mask'])
|
210 |
-
if not np.any(boundary):
|
211 |
-
continue
|
212 |
-
|
213 |
-
# Calculate contrast
|
214 |
-
wcag_contrast = self.calculate_wcag_contrast(info1['color'], info2['color'])
|
215 |
-
|
216 |
-
# Determine if there's an issue
|
217 |
-
if wcag_contrast < self.wcag_threshold:
|
218 |
-
issue = {
|
219 |
-
'categories': (info1['category'], info2['category']),
|
220 |
-
'contrast_ratio': wcag_contrast,
|
221 |
-
'boundary_area': np.sum(boundary),
|
222 |
-
'description': description,
|
223 |
-
'priority': priority
|
224 |
-
}
|
225 |
-
|
226 |
-
# Color-code boundaries and store issues
|
227 |
-
if priority == 'critical':
|
228 |
-
results['critical_issues'].append(issue)
|
229 |
-
results['visualization'][boundary] = [255, 0, 0] # Red
|
230 |
-
issue_counts['critical'] += 1
|
231 |
-
elif priority == 'high':
|
232 |
-
results['high_issues'].append(issue)
|
233 |
-
results['visualization'][boundary] = [255, 165, 0] # Orange
|
234 |
-
issue_counts['high'] += 1
|
235 |
-
elif priority == 'medium':
|
236 |
-
results['medium_issues'].append(issue)
|
237 |
-
results['visualization'][boundary] = [255, 255, 0] # Yellow
|
238 |
-
issue_counts['medium'] += 1
|
239 |
-
|
240 |
-
# Calculate statistics
|
241 |
-
results['statistics'] = {
|
242 |
-
'total_segments': len(segment_info),
|
243 |
-
'total_issues': sum(issue_counts.values()),
|
244 |
-
'critical_count': issue_counts['critical'],
|
245 |
-
'high_count': issue_counts['high'],
|
246 |
-
'medium_count': issue_counts['medium'],
|
247 |
-
'wcag_threshold': self.wcag_threshold
|
248 |
-
}
|
249 |
-
|
250 |
-
return results
|
251 |
-
|
252 |
########################################
|
253 |
# ONEFORMER INTEGRATION
|
254 |
########################################
|
@@ -341,16 +164,19 @@ class OneFormerManager:
|
|
341 |
|
342 |
|
343 |
########################################
|
344 |
-
#
|
345 |
########################################
|
346 |
|
347 |
-
class
|
348 |
-
"""
|
349 |
|
350 |
def __init__(self, model_path: str):
|
351 |
self.model_path = model_path
|
352 |
self.predictor = None
|
353 |
-
|
|
|
|
|
|
|
354 |
def initialize(self, threshold: float = 0.5) -> bool:
|
355 |
"""Initialize MaskRCNN model"""
|
356 |
try:
|
@@ -371,190 +197,175 @@ class BlackspotDetector:
|
|
371 |
logger.error(f"Failed to initialize blackspot detector: {e}")
|
372 |
return False
|
373 |
|
374 |
-
def
|
375 |
-
|
376 |
-
"""
|
377 |
-
|
378 |
-
|
379 |
-
segmentation_view = np.zeros((*image.shape[:2], 3), dtype=np.uint8)
|
380 |
-
segmentation_view[floor_mask] = [34, 139, 34] # Forest green for floor
|
381 |
-
segmentation_view[blackspot_mask] = [255, 0, 0] # Bright red for blackspots
|
382 |
-
segmentation_view[~(floor_mask | blackspot_mask)] = [128, 128, 128] # Gray for other areas
|
383 |
-
|
384 |
-
# 2. High Contrast Overlay
|
385 |
-
high_contrast_overlay = image.copy()
|
386 |
-
# Make background slightly darker to emphasize blackspots
|
387 |
-
high_contrast_overlay = cv2.convertScaleAbs(high_contrast_overlay, alpha=0.6, beta=0)
|
388 |
-
# Add bright overlays
|
389 |
-
high_contrast_overlay[floor_mask] = cv2.addWeighted(
|
390 |
-
high_contrast_overlay[floor_mask], 0.7,
|
391 |
-
np.full_like(high_contrast_overlay[floor_mask], [0, 255, 0]), 0.3, 0
|
392 |
-
)
|
393 |
-
high_contrast_overlay[blackspot_mask] = [255, 0, 255] # Magenta for maximum visibility
|
394 |
|
395 |
-
#
|
396 |
-
|
397 |
-
|
398 |
-
blackspot_only[floor_mask & ~blackspot_mask] = [64, 64, 64] # Dark gray for floor areas
|
399 |
|
400 |
-
|
401 |
-
|
402 |
-
side_by_side = np.zeros((h, w * 2, 3), dtype=np.uint8)
|
403 |
-
side_by_side[:, :w] = image
|
404 |
-
side_by_side[:, w:] = segmentation_view
|
405 |
|
406 |
-
#
|
407 |
-
|
408 |
-
|
|
|
409 |
|
410 |
-
#
|
411 |
-
|
|
|
|
|
|
|
412 |
|
413 |
-
#
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
cv2.drawContours(annotated_view, [contour], -1, (255, 0, 255), 2)
|
426 |
-
|
427 |
-
# Add label
|
428 |
-
area = cv2.contourArea(contour)
|
429 |
-
label = f"Blackspot {i+1}: {area:.0f}px"
|
430 |
-
cv2.putText(annotated_view, label, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2)
|
431 |
|
432 |
-
return
|
433 |
-
'segmentation_view': segmentation_view,
|
434 |
-
'high_contrast_overlay': high_contrast_overlay,
|
435 |
-
'blackspot_only': blackspot_only,
|
436 |
-
'side_by_side': side_by_side,
|
437 |
-
'annotated_view': annotated_view
|
438 |
-
}
|
439 |
|
440 |
-
def detect_blackspots(self, image: np.ndarray,
|
441 |
-
|
|
|
442 |
if self.predictor is None:
|
443 |
raise RuntimeError("Blackspot detector not initialized")
|
444 |
|
445 |
# Get original image dimensions
|
446 |
original_h, original_w = image.shape[:2]
|
447 |
|
448 |
-
#
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
floor_prior_resized = floor_prior
|
463 |
-
else:
|
464 |
-
floor_prior_resized = None
|
465 |
|
466 |
-
# Run detection
|
467 |
try:
|
468 |
-
outputs = self.predictor(
|
469 |
instances = outputs["instances"].to("cpu")
|
470 |
except Exception as e:
|
471 |
logger.error(f"Error in MaskRCNN prediction: {e}")
|
472 |
-
|
473 |
-
empty_mask = np.zeros(image.shape[:2], dtype=bool)
|
474 |
-
return {
|
475 |
-
'visualization': image,
|
476 |
-
'floor_mask': empty_mask,
|
477 |
-
'blackspot_mask': empty_mask,
|
478 |
-
'floor_area': 0,
|
479 |
-
'blackspot_area': 0,
|
480 |
-
'coverage_percentage': 0,
|
481 |
-
'num_detections': 0,
|
482 |
-
'avg_confidence': 0.0,
|
483 |
-
'enhanced_views': self.create_enhanced_visualizations(image, empty_mask, empty_mask)
|
484 |
-
}
|
485 |
|
486 |
-
# Process results
|
487 |
if len(instances) == 0:
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
else:
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
blackspot_masks = pred_masks[blackspot_indices] if np.any(blackspot_indices) else []
|
503 |
-
blackspot_scores = scores[blackspot_indices] if np.any(blackspot_indices) else []
|
504 |
-
|
505 |
-
# Combine masks
|
506 |
-
combined_floor = np.zeros(image.shape[:2], dtype=bool)
|
507 |
-
combined_blackspot = np.zeros(image.shape[:2], dtype=bool)
|
508 |
-
|
509 |
-
for mask in floor_masks:
|
510 |
-
combined_floor |= mask
|
511 |
-
|
512 |
-
for mask in blackspot_masks:
|
513 |
-
combined_blackspot |= mask
|
514 |
-
|
515 |
-
# Apply floor prior if available
|
516 |
-
if floor_prior_resized is not None:
|
517 |
-
# Combine OneFormer floor detection with MaskRCNN floor detection
|
518 |
-
combined_floor |= floor_prior_resized
|
519 |
-
# Keep only blackspots that are on floors
|
520 |
-
combined_blackspot &= combined_floor
|
521 |
|
522 |
-
#
|
523 |
-
|
|
|
|
|
|
|
|
|
|
|
524 |
|
525 |
# Calculate statistics
|
526 |
-
floor_area = int(np.sum(
|
527 |
blackspot_area = int(np.sum(combined_blackspot))
|
528 |
coverage_percentage = (blackspot_area / floor_area * 100) if floor_area > 0 else 0
|
529 |
|
530 |
-
# Count individual blackspot instances
|
531 |
-
blackspot_contours, _ = cv2.findContours(
|
532 |
-
combined_blackspot.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
533 |
-
)
|
534 |
-
actual_detections = len([c for c in blackspot_contours if cv2.contourArea(c) > 50])
|
535 |
-
|
536 |
return {
|
537 |
-
'visualization':
|
538 |
-
'floor_mask':
|
539 |
'blackspot_mask': combined_blackspot,
|
540 |
'floor_area': floor_area,
|
541 |
'blackspot_area': blackspot_area,
|
542 |
'coverage_percentage': coverage_percentage,
|
543 |
-
'num_detections':
|
544 |
-
'avg_confidence': float(np.mean(blackspot_scores)) if len(blackspot_scores) > 0 else 0.0
|
545 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
546 |
}
|
|
|
|
|
547 |
########################################
|
548 |
-
#
|
549 |
########################################
|
550 |
|
551 |
class NeuroNestApp:
|
552 |
-
"""Main application class integrating all components
|
553 |
|
554 |
def __init__(self):
|
555 |
self.oneformer = OneFormerManager()
|
556 |
self.blackspot_detector = None
|
557 |
-
self.contrast_analyzer = UniversalContrastAnalyzer()
|
558 |
self.initialized = False
|
559 |
|
560 |
def initialize(self, blackspot_model_path: str = "./output_floor_blackspot/model_0004999.pth"):
|
@@ -567,8 +378,8 @@ class NeuroNestApp:
|
|
567 |
# Initialize blackspot detector if model exists
|
568 |
blackspot_success = False
|
569 |
if os.path.exists(blackspot_model_path):
|
570 |
-
self.blackspot_detector =
|
571 |
-
blackspot_success =
|
572 |
else:
|
573 |
logger.warning(f"Blackspot model not found at {blackspot_model_path}")
|
574 |
|
@@ -581,7 +392,7 @@ class NeuroNestApp:
|
|
581 |
contrast_threshold: float = 4.5,
|
582 |
enable_blackspot: bool = True,
|
583 |
enable_contrast: bool = True) -> Dict:
|
584 |
-
"""Perform complete image analysis
|
585 |
|
586 |
if not self.initialized:
|
587 |
return {"error": "Application not properly initialized"}
|
@@ -603,53 +414,59 @@ class NeuroNestApp:
|
|
603 |
'statistics': {}
|
604 |
}
|
605 |
|
606 |
-
# 1. Semantic Segmentation
|
607 |
logger.info("Running semantic segmentation...")
|
608 |
seg_mask, seg_visualization = self.oneformer.semantic_segmentation(image_rgb)
|
609 |
-
logger.info(f"Segmentation mask shape: {seg_mask.shape}")
|
610 |
|
611 |
results['segmentation'] = {
|
612 |
'visualization': seg_visualization,
|
613 |
'mask': seg_mask
|
614 |
}
|
615 |
|
616 |
-
# Extract floor areas
|
617 |
floor_prior = self.oneformer.extract_floor_areas(seg_mask)
|
618 |
-
logger.info(f"Floor prior shape: {floor_prior.shape}, total floor pixels: {np.sum(floor_prior)}")
|
619 |
|
620 |
-
# 2. Blackspot Detection (
|
621 |
if enable_blackspot and self.blackspot_detector is not None:
|
622 |
logger.info("Running blackspot detection...")
|
623 |
try:
|
624 |
-
|
625 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
626 |
results['blackspot'] = blackspot_results
|
627 |
-
logger.info("Blackspot detection completed
|
628 |
except Exception as e:
|
629 |
logger.error(f"Error in blackspot detection: {e}")
|
630 |
-
# Continue without blackspot results
|
631 |
results['blackspot'] = None
|
632 |
|
633 |
-
# 3. Contrast Analysis
|
634 |
if enable_contrast:
|
635 |
-
logger.info("Running contrast analysis...")
|
636 |
try:
|
637 |
-
#
|
638 |
-
|
639 |
-
|
640 |
-
if w != width:
|
641 |
-
scale = width / w
|
642 |
-
new_h = int(h * scale)
|
643 |
-
image_for_contrast = cv2.resize(image_rgb, (width, new_h))
|
644 |
-
else:
|
645 |
-
image_for_contrast = image_rgb
|
646 |
|
647 |
-
contrast_results = self.contrast_analyzer.analyze_contrast(
|
|
|
|
|
648 |
results['contrast'] = contrast_results
|
649 |
-
logger.info("Contrast analysis completed
|
650 |
except Exception as e:
|
651 |
logger.error(f"Error in contrast analysis: {e}")
|
652 |
-
# Continue without contrast results
|
653 |
results['contrast'] = None
|
654 |
|
655 |
# 4. Generate combined statistics
|
@@ -691,31 +508,25 @@ class NeuroNestApp:
|
|
691 |
# Contrast stats
|
692 |
if results['contrast']:
|
693 |
cs = results['contrast']['statistics']
|
694 |
-
# Count issues by severity
|
695 |
-
critical_count = sum(1 for issue in results['contrast'].get('issues', []) if issue['severity'] == 'critical')
|
696 |
-
high_count = sum(1 for issue in results['contrast'].get('issues', []) if issue['severity'] == 'high')
|
697 |
-
medium_count = sum(1 for issue in results['contrast'].get('issues', []) if issue['severity'] == 'medium')
|
698 |
-
|
699 |
stats['contrast'] = {
|
700 |
-
'
|
701 |
-
'
|
702 |
-
'
|
703 |
-
'
|
704 |
-
'
|
|
|
705 |
'floor_object_issues': cs.get('floor_object_issues', 0)
|
706 |
}
|
707 |
|
708 |
return stats
|
709 |
-
|
710 |
-
# GRADIO INTERFACE
|
711 |
-
########################################
|
712 |
|
713 |
########################################
|
714 |
-
#
|
715 |
########################################
|
716 |
|
717 |
def create_gradio_interface():
|
718 |
-
"""Create the
|
719 |
|
720 |
# Initialize the application
|
721 |
app = NeuroNestApp()
|
@@ -725,10 +536,10 @@ def create_gradio_interface():
|
|
725 |
raise RuntimeError("Failed to initialize OneFormer")
|
726 |
|
727 |
def analyze_wrapper(image_path, blackspot_threshold, contrast_threshold,
|
728 |
-
enable_blackspot, enable_contrast
|
729 |
-
"""
|
730 |
if image_path is None:
|
731 |
-
return None, None, None, None,
|
732 |
|
733 |
results = app.analyze_image(
|
734 |
image_path=image_path,
|
@@ -739,164 +550,106 @@ def create_gradio_interface():
|
|
739 |
)
|
740 |
|
741 |
if "error" in results:
|
742 |
-
return None, None, None, None,
|
743 |
|
744 |
# Extract outputs
|
745 |
seg_output = results['segmentation']['visualization'] if results['segmentation'] else None
|
746 |
-
|
747 |
-
# Enhanced blackspot output selection
|
748 |
-
blackspot_output = None
|
749 |
-
blackspot_segmentation = None
|
750 |
-
if results['blackspot'] and 'enhanced_views' in results['blackspot']:
|
751 |
-
views = results['blackspot']['enhanced_views']
|
752 |
-
|
753 |
-
# Select view based on user choice
|
754 |
-
if blackspot_view_type == "High Contrast":
|
755 |
-
blackspot_output = views['high_contrast_overlay']
|
756 |
-
elif blackspot_view_type == "Segmentation Only":
|
757 |
-
blackspot_output = views['segmentation_view']
|
758 |
-
elif blackspot_view_type == "Blackspots Only":
|
759 |
-
blackspot_output = views['blackspot_only']
|
760 |
-
elif blackspot_view_type == "Side by Side":
|
761 |
-
blackspot_output = views['side_by_side']
|
762 |
-
elif blackspot_view_type == "Annotated":
|
763 |
-
blackspot_output = views['annotated_view']
|
764 |
-
else:
|
765 |
-
blackspot_output = views['high_contrast_overlay']
|
766 |
-
|
767 |
-
# Always provide segmentation view for the dedicated tab
|
768 |
-
blackspot_segmentation = views['segmentation_view']
|
769 |
-
|
770 |
contrast_output = results['contrast']['visualization'] if results['contrast'] else None
|
771 |
|
772 |
-
# Generate report
|
773 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
774 |
|
775 |
-
return seg_output, blackspot_output,
|
776 |
|
777 |
-
|
778 |
-
|
779 |
-
"
|
780 |
-
report
|
781 |
|
782 |
# Segmentation results
|
783 |
if results['segmentation']:
|
784 |
stats = results['statistics'].get('segmentation', {})
|
785 |
-
report.append(
|
786 |
-
report.append(f"- **
|
787 |
-
report.append(f"- **
|
788 |
report.append("")
|
789 |
|
790 |
-
#
|
791 |
if results['blackspot']:
|
792 |
bs_stats = results['statistics'].get('blackspot', {})
|
793 |
-
report.append(
|
794 |
report.append(f"- **Floor area:** {bs_stats.get('floor_area_pixels', 0):,} pixels")
|
795 |
report.append(f"- **Blackspot area:** {bs_stats.get('blackspot_area_pixels', 0):,} pixels")
|
796 |
report.append(f"- **Coverage:** {bs_stats.get('coverage_percentage', 0):.2f}% of floor")
|
797 |
-
report.append(f"- **
|
798 |
-
report.append(f"- **Average confidence:** {bs_stats.get('avg_confidence', 0):.2f}")
|
799 |
|
800 |
# Risk assessment
|
801 |
coverage = bs_stats.get('coverage_percentage', 0)
|
802 |
-
if coverage >
|
803 |
-
report.append(
|
|
|
|
|
804 |
elif coverage > 1:
|
805 |
-
report.append(
|
806 |
elif coverage > 0:
|
807 |
-
report.append(
|
808 |
else:
|
809 |
-
report.append(
|
810 |
report.append("")
|
811 |
|
812 |
-
#
|
813 |
-
|
814 |
-
|
815 |
-
|
816 |
-
report.append(f"- **Adjacent pairs analyzed:** {results['contrast']['statistics'].get('analyzed_pairs', 0)}")
|
817 |
-
report.append(f"- **Total contrast issues:** {contrast_stats.get('total_issues', 0)}")
|
818 |
-
report.append(f"- **🔴 Critical:** {contrast_stats.get('critical_issues', 0)}")
|
819 |
-
report.append(f"- **🟠 High priority:** {contrast_stats.get('high_priority_issues', 0)}")
|
820 |
-
report.append(f"- **🟡 Medium priority:** {contrast_stats.get('medium_priority_issues', 0)}")
|
821 |
-
report.append(f"- **⚠️ Floor-object issues:** {contrast_stats.get('floor_object_issues', 0)}")
|
822 |
-
report.append("")
|
823 |
-
|
824 |
-
# Add detailed issues
|
825 |
-
issues = results['contrast'].get('issues', [])
|
826 |
-
if issues:
|
827 |
-
# Group by severity
|
828 |
-
critical_issues = [i for i in issues if i['severity'] == 'critical']
|
829 |
-
high_issues = [i for i in issues if i['severity'] == 'high']
|
830 |
-
|
831 |
-
if critical_issues:
|
832 |
-
report.append("### 🔴 Critical Issues (Immediate Attention Required)")
|
833 |
-
for issue in critical_issues[:5]: # Show top 5
|
834 |
-
cats = f"{issue['categories'][0]} ↔ {issue['categories'][1]}"
|
835 |
-
ratio = issue['wcag_ratio']
|
836 |
-
report.append(f"- **{cats}**: {ratio:.1f}:1 contrast ratio")
|
837 |
-
if issue['is_floor_object']:
|
838 |
-
report.append(f" _⚠️ Object on floor - high visibility required!_")
|
839 |
-
report.append("")
|
840 |
-
|
841 |
-
if high_issues:
|
842 |
-
report.append("### 🟠 High Priority Issues")
|
843 |
-
for issue in high_issues[:3]: # Show top 3
|
844 |
-
cats = f"{issue['categories'][0]} ↔ {issue['categories'][1]}"
|
845 |
-
ratio = issue['wcag_ratio']
|
846 |
-
report.append(f"- **{cats}**: {ratio:.1f}:1 contrast ratio")
|
847 |
-
report.append("")
|
848 |
|
849 |
-
#
|
850 |
-
report.append("## 📋 Recommendations")
|
851 |
|
852 |
-
|
853 |
-
if results['blackspot']:
|
854 |
-
coverage = results['statistics'].get('blackspot', {}).get('coverage_percentage', 0)
|
855 |
-
if coverage > 0:
|
856 |
-
report.append("### Blackspot Mitigation")
|
857 |
-
report.append("- Remove or replace dark-colored floor materials in detected areas")
|
858 |
-
report.append("- Improve lighting in blackspot areas")
|
859 |
-
report.append("- Consider using light-colored rugs or mats to cover blackspots")
|
860 |
-
report.append("- Add visual cues like contrasting tape around problem areas")
|
861 |
-
report.append("")
|
862 |
|
863 |
-
|
864 |
-
|
865 |
-
|
866 |
-
report.append("
|
867 |
-
report.append("-
|
868 |
-
report.append("- Use
|
869 |
-
report.append("- Add
|
870 |
-
report.append("- Consider color therapy guidelines for dementia")
|
871 |
-
report.append("")
|
872 |
|
873 |
-
if
|
874 |
-
|
875 |
-
report.append("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
876 |
|
877 |
return "\n".join(report)
|
878 |
|
879 |
-
# Create the interface
|
880 |
-
title = "🧠 NeuroNest:
|
881 |
description = """
|
882 |
-
**
|
|
|
|
|
|
|
|
|
|
|
883 |
|
884 |
-
|
885 |
-
- **Semantic Segmentation**: Identifies rooms, furniture, and objects
|
886 |
-
- **Enhanced Blackspot Detection**: Locates and visualizes dangerous black areas on floors
|
887 |
-
- **Contrast Analysis**: Evaluates color contrast for visual accessibility
|
888 |
"""
|
889 |
|
890 |
-
with gr.Blocks(
|
891 |
-
title=title,
|
892 |
-
css="""
|
893 |
-
.main-header { text-align: center; margin-bottom: 2rem; }
|
894 |
-
.analysis-section { border: 2px solid #f0f0f0; border-radius: 10px; padding: 1rem; margin: 1rem 0; }
|
895 |
-
.critical-text { color: #ff0000; font-weight: bold; }
|
896 |
-
.high-text { color: #ff8800; font-weight: bold; }
|
897 |
-
.medium-text { color: #ffaa00; font-weight: bold; }
|
898 |
-
"""
|
899 |
-
) as interface:
|
900 |
|
901 |
gr.Markdown(f"# {title}")
|
902 |
gr.Markdown(description)
|
@@ -908,14 +661,14 @@ def create_gradio_interface():
|
|
908 |
image_input = gr.Image(
|
909 |
label="📸 Upload Room Image",
|
910 |
type="filepath",
|
911 |
-
height=
|
912 |
)
|
913 |
|
914 |
# Analysis settings
|
915 |
-
with gr.Accordion("
|
916 |
enable_blackspot = gr.Checkbox(
|
917 |
value=blackspot_ok,
|
918 |
-
label="Enable Blackspot Detection",
|
919 |
interactive=blackspot_ok
|
920 |
)
|
921 |
|
@@ -924,29 +677,21 @@ def create_gradio_interface():
|
|
924 |
maximum=0.9,
|
925 |
value=0.5,
|
926 |
step=0.05,
|
927 |
-
label="
|
928 |
-
visible=blackspot_ok
|
929 |
-
)
|
930 |
-
|
931 |
-
# NEW: Blackspot visualization options
|
932 |
-
blackspot_view_type = gr.Radio(
|
933 |
-
choices=["High Contrast", "Segmentation Only", "Blackspots Only", "Side by Side", "Annotated"],
|
934 |
-
value="High Contrast",
|
935 |
-
label="Blackspot Visualization Style",
|
936 |
visible=blackspot_ok
|
937 |
)
|
938 |
|
939 |
enable_contrast = gr.Checkbox(
|
940 |
value=True,
|
941 |
-
label="Enable Contrast Analysis"
|
942 |
)
|
943 |
|
944 |
contrast_threshold = gr.Slider(
|
945 |
-
minimum=
|
946 |
-
maximum=
|
947 |
value=4.5,
|
948 |
step=0.1,
|
949 |
-
label="WCAG Contrast Threshold"
|
950 |
)
|
951 |
|
952 |
# Analysis button
|
@@ -958,43 +703,34 @@ def create_gradio_interface():
|
|
958 |
|
959 |
# Output Column
|
960 |
with gr.Column(scale=2):
|
961 |
-
#
|
962 |
-
main_display = gr.Image(
|
963 |
-
label="🎯 Object Detection & Segmentation",
|
964 |
-
height=400,
|
965 |
-
interactive=False
|
966 |
-
)
|
967 |
-
|
968 |
-
# Enhanced analysis tabs
|
969 |
with gr.Tabs():
|
970 |
with gr.Tab("📊 Analysis Report"):
|
971 |
analysis_report = gr.Markdown(
|
972 |
-
value="Upload an image and click 'Analyze Environment' to
|
973 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
974 |
)
|
975 |
|
976 |
if blackspot_ok:
|
977 |
-
with gr.Tab("⚫
|
978 |
blackspot_display = gr.Image(
|
979 |
-
label="Blackspot
|
980 |
-
height=
|
981 |
-
interactive=False
|
982 |
-
)
|
983 |
-
|
984 |
-
with gr.Tab("🔍 Blackspot Segmentation"):
|
985 |
-
blackspot_segmentation_display = gr.Image(
|
986 |
-
label="Pure Blackspot Segmentation",
|
987 |
-
height=300,
|
988 |
interactive=False
|
989 |
)
|
990 |
else:
|
991 |
blackspot_display = gr.Image(visible=False)
|
992 |
-
blackspot_segmentation_display = gr.Image(visible=False)
|
993 |
|
994 |
with gr.Tab("🎨 Contrast Analysis"):
|
995 |
contrast_display = gr.Image(
|
996 |
-
label="Contrast
|
997 |
-
height=
|
998 |
interactive=False
|
999 |
)
|
1000 |
|
@@ -1006,59 +742,28 @@ def create_gradio_interface():
|
|
1006 |
blackspot_threshold,
|
1007 |
contrast_threshold,
|
1008 |
enable_blackspot,
|
1009 |
-
enable_contrast
|
1010 |
-
blackspot_view_type
|
1011 |
],
|
1012 |
outputs=[
|
1013 |
-
|
1014 |
blackspot_display,
|
1015 |
-
blackspot_segmentation_display,
|
1016 |
contrast_display,
|
1017 |
analysis_report
|
1018 |
]
|
1019 |
)
|
1020 |
|
1021 |
-
# Example images (optional)
|
1022 |
-
example_dir = Path("examples")
|
1023 |
-
if example_dir.exists():
|
1024 |
-
examples = [
|
1025 |
-
[str(img), 0.5, 4.5, True, True, "High Contrast"]
|
1026 |
-
for img in example_dir.glob("*.jpg")
|
1027 |
-
]
|
1028 |
-
|
1029 |
-
if examples:
|
1030 |
-
gr.Examples(
|
1031 |
-
examples=examples[:3], # Show max 3 examples
|
1032 |
-
inputs=[
|
1033 |
-
image_input,
|
1034 |
-
blackspot_threshold,
|
1035 |
-
contrast_threshold,
|
1036 |
-
enable_blackspot,
|
1037 |
-
enable_contrast,
|
1038 |
-
blackspot_view_type
|
1039 |
-
],
|
1040 |
-
outputs=[
|
1041 |
-
main_display,
|
1042 |
-
blackspot_display,
|
1043 |
-
blackspot_segmentation_display,
|
1044 |
-
contrast_display,
|
1045 |
-
analysis_report
|
1046 |
-
],
|
1047 |
-
fn=analyze_wrapper,
|
1048 |
-
label="🖼️ Example Images"
|
1049 |
-
)
|
1050 |
-
|
1051 |
# Footer
|
1052 |
gr.Markdown("""
|
1053 |
---
|
1054 |
-
**NeuroNest** -
|
1055 |
-
*
|
1056 |
""")
|
1057 |
|
1058 |
return interface
|
1059 |
|
1060 |
-
|
1061 |
-
|
|
|
1062 |
########################################
|
1063 |
|
1064 |
if __name__ == "__main__":
|
@@ -1067,12 +772,10 @@ if __name__ == "__main__":
|
|
1067 |
|
1068 |
try:
|
1069 |
interface = create_gradio_interface()
|
1070 |
-
|
1071 |
-
# Fixed launch call - removed incompatible parameters
|
1072 |
interface.queue(max_size=10).launch(
|
1073 |
server_name="0.0.0.0",
|
1074 |
server_port=7860,
|
1075 |
-
share=
|
1076 |
)
|
1077 |
except Exception as e:
|
1078 |
logger.error(f"Failed to launch application: {e}")
|
|
|
72 |
|
73 |
from utils.universal_contrast_analyzer import UniversalContrastAnalyzer
|
74 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
########################################
|
76 |
# ONEFORMER INTEGRATION
|
77 |
########################################
|
|
|
164 |
|
165 |
|
166 |
########################################
|
167 |
+
# IMPROVED BLACKSPOT DETECTION
|
168 |
########################################
|
169 |
|
170 |
+
class ImprovedBlackspotDetector:
|
171 |
+
"""Enhanced blackspot detector that only detects on floor surfaces"""
|
172 |
|
173 |
def __init__(self, model_path: str):
|
174 |
self.model_path = model_path
|
175 |
self.predictor = None
|
176 |
+
|
177 |
+
# Expanded floor-related classes in ADE20K
|
178 |
+
self.floor_classes = [3, 4, 13, 28, 78] # floor, wood floor, rug, carpet, mat
|
179 |
+
|
180 |
def initialize(self, threshold: float = 0.5) -> bool:
|
181 |
"""Initialize MaskRCNN model"""
|
182 |
try:
|
|
|
197 |
logger.error(f"Failed to initialize blackspot detector: {e}")
|
198 |
return False
|
199 |
|
200 |
+
def is_on_floor_surface(self, blackspot_mask: np.ndarray, segmentation: np.ndarray,
|
201 |
+
floor_mask: np.ndarray, overlap_threshold: float = 0.8) -> bool:
|
202 |
+
"""Check if a blackspot is actually on a floor surface"""
|
203 |
+
if np.sum(blackspot_mask) == 0:
|
204 |
+
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
+
# Check overlap with floor mask
|
207 |
+
overlap = blackspot_mask & floor_mask
|
208 |
+
overlap_ratio = np.sum(overlap) / np.sum(blackspot_mask)
|
|
|
209 |
|
210 |
+
if overlap_ratio < overlap_threshold:
|
211 |
+
return False
|
|
|
|
|
|
|
212 |
|
213 |
+
# Additional check: verify the underlying segmentation class
|
214 |
+
blackspot_pixels = segmentation[blackspot_mask]
|
215 |
+
if len(blackspot_pixels) == 0:
|
216 |
+
return False
|
217 |
|
218 |
+
# Check if majority of pixels are floor-related classes
|
219 |
+
unique_classes, counts = np.unique(blackspot_pixels, return_counts=True)
|
220 |
+
floor_pixel_count = sum(counts[unique_classes == cls] for cls in self.floor_classes
|
221 |
+
if cls in unique_classes)
|
222 |
+
floor_ratio = floor_pixel_count / len(blackspot_pixels)
|
223 |
|
224 |
+
return floor_ratio > 0.7 # At least 70% of blackspot should be on floor classes
|
225 |
+
|
226 |
+
def filter_non_floor_blackspots(self, blackspot_masks: List[np.ndarray],
|
227 |
+
segmentation: np.ndarray, floor_mask: np.ndarray) -> List[np.ndarray]:
|
228 |
+
"""Filter out blackspots that are not on floor surfaces"""
|
229 |
+
filtered_masks = []
|
230 |
+
|
231 |
+
for mask in blackspot_masks:
|
232 |
+
if self.is_on_floor_surface(mask, segmentation, floor_mask):
|
233 |
+
filtered_masks.append(mask)
|
234 |
+
else:
|
235 |
+
logger.debug(f"Filtered out non-floor blackspot with area {np.sum(mask)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
+
return filtered_masks
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
+
def detect_blackspots(self, image: np.ndarray, segmentation: np.ndarray,
|
240 |
+
floor_prior: Optional[np.ndarray] = None) -> Dict:
|
241 |
+
"""Detect blackspots only on floor surfaces"""
|
242 |
if self.predictor is None:
|
243 |
raise RuntimeError("Blackspot detector not initialized")
|
244 |
|
245 |
# Get original image dimensions
|
246 |
original_h, original_w = image.shape[:2]
|
247 |
|
248 |
+
# Ensure all masks have same dimensions
|
249 |
+
if floor_prior is not None and floor_prior.shape != (original_h, original_w):
|
250 |
+
floor_prior = cv2.resize(
|
251 |
+
floor_prior.astype(np.uint8),
|
252 |
+
(original_w, original_h),
|
253 |
+
interpolation=cv2.INTER_NEAREST
|
254 |
+
).astype(bool)
|
255 |
+
|
256 |
+
if segmentation.shape != (original_h, original_w):
|
257 |
+
segmentation = cv2.resize(
|
258 |
+
segmentation.astype(np.uint8),
|
259 |
+
(original_w, original_h),
|
260 |
+
interpolation=cv2.INTER_NEAREST
|
261 |
+
)
|
|
|
|
|
|
|
262 |
|
263 |
+
# Run detection
|
264 |
try:
|
265 |
+
outputs = self.predictor(image)
|
266 |
instances = outputs["instances"].to("cpu")
|
267 |
except Exception as e:
|
268 |
logger.error(f"Error in MaskRCNN prediction: {e}")
|
269 |
+
return self._empty_results(image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
|
|
|
271 |
if len(instances) == 0:
|
272 |
+
return self._empty_results(image)
|
273 |
+
|
274 |
+
# Process results
|
275 |
+
pred_classes = instances.pred_classes.numpy()
|
276 |
+
pred_masks = instances.pred_masks.numpy()
|
277 |
+
scores = instances.scores.numpy()
|
278 |
+
|
279 |
+
# Separate floor and blackspot masks
|
280 |
+
blackspot_indices = pred_classes == 1
|
281 |
+
blackspot_masks = pred_masks[blackspot_indices] if np.any(blackspot_indices) else []
|
282 |
+
blackspot_scores = scores[blackspot_indices] if np.any(blackspot_indices) else []
|
283 |
+
|
284 |
+
# Create or use floor mask
|
285 |
+
if floor_prior is not None:
|
286 |
+
floor_mask = floor_prior
|
287 |
else:
|
288 |
+
# Create floor mask from segmentation
|
289 |
+
floor_mask = np.zeros(segmentation.shape, dtype=bool)
|
290 |
+
for cls in self.floor_classes:
|
291 |
+
floor_mask |= (segmentation == cls)
|
292 |
+
|
293 |
+
# Filter blackspots to only those on floor surfaces
|
294 |
+
filtered_blackspot_masks = self.filter_non_floor_blackspots(
|
295 |
+
blackspot_masks, segmentation, floor_mask
|
296 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
|
298 |
+
# Combine filtered masks
|
299 |
+
combined_blackspot = np.zeros(image.shape[:2], dtype=bool)
|
300 |
+
for mask in filtered_blackspot_masks:
|
301 |
+
combined_blackspot |= mask
|
302 |
+
|
303 |
+
# Create visualizations
|
304 |
+
visualization = self.create_visualization(image, floor_mask, combined_blackspot)
|
305 |
|
306 |
# Calculate statistics
|
307 |
+
floor_area = int(np.sum(floor_mask))
|
308 |
blackspot_area = int(np.sum(combined_blackspot))
|
309 |
coverage_percentage = (blackspot_area / floor_area * 100) if floor_area > 0 else 0
|
310 |
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
return {
|
312 |
+
'visualization': visualization,
|
313 |
+
'floor_mask': floor_mask,
|
314 |
'blackspot_mask': combined_blackspot,
|
315 |
'floor_area': floor_area,
|
316 |
'blackspot_area': blackspot_area,
|
317 |
'coverage_percentage': coverage_percentage,
|
318 |
+
'num_detections': len(filtered_blackspot_masks),
|
319 |
+
'avg_confidence': float(np.mean(blackspot_scores)) if len(blackspot_scores) > 0 else 0.0
|
320 |
+
}
|
321 |
+
|
322 |
+
def create_visualization(self, image: np.ndarray, floor_mask: np.ndarray,
|
323 |
+
blackspot_mask: np.ndarray) -> np.ndarray:
|
324 |
+
"""Create clear visualization of blackspots on floors only"""
|
325 |
+
vis = image.copy()
|
326 |
+
|
327 |
+
# Semi-transparent green overlay for floors
|
328 |
+
floor_overlay = vis.copy()
|
329 |
+
floor_overlay[floor_mask] = [0, 255, 0]
|
330 |
+
vis = cv2.addWeighted(vis, 0.7, floor_overlay, 0.3, 0)
|
331 |
+
|
332 |
+
# Bright red for blackspots
|
333 |
+
vis[blackspot_mask] = [255, 0, 0]
|
334 |
+
|
335 |
+
# Add contours for clarity
|
336 |
+
blackspot_contours, _ = cv2.findContours(
|
337 |
+
blackspot_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
338 |
+
)
|
339 |
+
cv2.drawContours(vis, blackspot_contours, -1, (255, 255, 0), 2)
|
340 |
+
|
341 |
+
return vis
|
342 |
+
|
343 |
+
def _empty_results(self, image: np.ndarray) -> Dict:
|
344 |
+
"""Return empty results structure"""
|
345 |
+
empty_mask = np.zeros(image.shape[:2], dtype=bool)
|
346 |
+
return {
|
347 |
+
'visualization': image,
|
348 |
+
'floor_mask': empty_mask,
|
349 |
+
'blackspot_mask': empty_mask,
|
350 |
+
'floor_area': 0,
|
351 |
+
'blackspot_area': 0,
|
352 |
+
'coverage_percentage': 0,
|
353 |
+
'num_detections': 0,
|
354 |
+
'avg_confidence': 0.0
|
355 |
}
|
356 |
+
|
357 |
+
|
358 |
########################################
|
359 |
+
# MAIN APPLICATION CLASS
|
360 |
########################################
|
361 |
|
362 |
class NeuroNestApp:
|
363 |
+
"""Main application class integrating all components"""
|
364 |
|
365 |
def __init__(self):
|
366 |
self.oneformer = OneFormerManager()
|
367 |
self.blackspot_detector = None
|
368 |
+
self.contrast_analyzer = UniversalContrastAnalyzer(wcag_threshold=4.5)
|
369 |
self.initialized = False
|
370 |
|
371 |
def initialize(self, blackspot_model_path: str = "./output_floor_blackspot/model_0004999.pth"):
|
|
|
378 |
# Initialize blackspot detector if model exists
|
379 |
blackspot_success = False
|
380 |
if os.path.exists(blackspot_model_path):
|
381 |
+
self.blackspot_detector = ImprovedBlackspotDetector(blackspot_model_path)
|
382 |
+
blackspot_success = self.blackspot_detector.initialize()
|
383 |
else:
|
384 |
logger.warning(f"Blackspot model not found at {blackspot_model_path}")
|
385 |
|
|
|
392 |
contrast_threshold: float = 4.5,
|
393 |
enable_blackspot: bool = True,
|
394 |
enable_contrast: bool = True) -> Dict:
|
395 |
+
"""Perform complete image analysis"""
|
396 |
|
397 |
if not self.initialized:
|
398 |
return {"error": "Application not properly initialized"}
|
|
|
414 |
'statistics': {}
|
415 |
}
|
416 |
|
417 |
+
# 1. Semantic Segmentation
|
418 |
logger.info("Running semantic segmentation...")
|
419 |
seg_mask, seg_visualization = self.oneformer.semantic_segmentation(image_rgb)
|
|
|
420 |
|
421 |
results['segmentation'] = {
|
422 |
'visualization': seg_visualization,
|
423 |
'mask': seg_mask
|
424 |
}
|
425 |
|
426 |
+
# Extract floor areas
|
427 |
floor_prior = self.oneformer.extract_floor_areas(seg_mask)
|
|
|
428 |
|
429 |
+
# 2. Blackspot Detection (improved to only detect on floors)
|
430 |
if enable_blackspot and self.blackspot_detector is not None:
|
431 |
logger.info("Running blackspot detection...")
|
432 |
try:
|
433 |
+
# Resize segmentation mask to match original image if needed
|
434 |
+
h_orig, w_orig = image_rgb.shape[:2]
|
435 |
+
h_seg, w_seg = seg_mask.shape
|
436 |
+
|
437 |
+
if (h_seg, w_seg) != (h_orig, w_orig):
|
438 |
+
seg_mask_resized = cv2.resize(
|
439 |
+
seg_mask.astype(np.uint8),
|
440 |
+
(w_orig, h_orig),
|
441 |
+
interpolation=cv2.INTER_NEAREST
|
442 |
+
)
|
443 |
+
else:
|
444 |
+
seg_mask_resized = seg_mask
|
445 |
+
|
446 |
+
blackspot_results = self.blackspot_detector.detect_blackspots(
|
447 |
+
image_rgb, seg_mask_resized, floor_prior
|
448 |
+
)
|
449 |
results['blackspot'] = blackspot_results
|
450 |
+
logger.info("Blackspot detection completed")
|
451 |
except Exception as e:
|
452 |
logger.error(f"Error in blackspot detection: {e}")
|
|
|
453 |
results['blackspot'] = None
|
454 |
|
455 |
+
# 3. Universal Contrast Analysis
|
456 |
if enable_contrast:
|
457 |
+
logger.info("Running universal contrast analysis...")
|
458 |
try:
|
459 |
+
# Resize image to match segmentation size
|
460 |
+
h_seg, w_seg = seg_mask.shape
|
461 |
+
image_for_contrast = cv2.resize(image_rgb, (w_seg, h_seg))
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
|
463 |
+
contrast_results = self.contrast_analyzer.analyze_contrast(
|
464 |
+
image_for_contrast, seg_mask
|
465 |
+
)
|
466 |
results['contrast'] = contrast_results
|
467 |
+
logger.info("Contrast analysis completed")
|
468 |
except Exception as e:
|
469 |
logger.error(f"Error in contrast analysis: {e}")
|
|
|
470 |
results['contrast'] = None
|
471 |
|
472 |
# 4. Generate combined statistics
|
|
|
508 |
# Contrast stats
|
509 |
if results['contrast']:
|
510 |
cs = results['contrast']['statistics']
|
|
|
|
|
|
|
|
|
|
|
511 |
stats['contrast'] = {
|
512 |
+
'total_segments': cs.get('total_segments', 0),
|
513 |
+
'analyzed_pairs': cs.get('analyzed_pairs', 0),
|
514 |
+
'low_contrast_pairs': cs.get('low_contrast_pairs', 0),
|
515 |
+
'critical_issues': cs.get('critical_issues', 0),
|
516 |
+
'high_priority_issues': cs.get('high_priority_issues', 0),
|
517 |
+
'medium_priority_issues': cs.get('medium_priority_issues', 0),
|
518 |
'floor_object_issues': cs.get('floor_object_issues', 0)
|
519 |
}
|
520 |
|
521 |
return stats
|
522 |
+
|
|
|
|
|
523 |
|
524 |
########################################
|
525 |
+
# GRADIO INTERFACE
|
526 |
########################################
|
527 |
|
528 |
def create_gradio_interface():
|
529 |
+
"""Create the Gradio interface"""
|
530 |
|
531 |
# Initialize the application
|
532 |
app = NeuroNestApp()
|
|
|
536 |
raise RuntimeError("Failed to initialize OneFormer")
|
537 |
|
538 |
def analyze_wrapper(image_path, blackspot_threshold, contrast_threshold,
|
539 |
+
enable_blackspot, enable_contrast):
|
540 |
+
"""Wrapper function for Gradio interface"""
|
541 |
if image_path is None:
|
542 |
+
return None, None, None, None, "Please upload an image"
|
543 |
|
544 |
results = app.analyze_image(
|
545 |
image_path=image_path,
|
|
|
550 |
)
|
551 |
|
552 |
if "error" in results:
|
553 |
+
return None, None, None, None, f"Error: {results['error']}"
|
554 |
|
555 |
# Extract outputs
|
556 |
seg_output = results['segmentation']['visualization'] if results['segmentation'] else None
|
557 |
+
blackspot_output = results['blackspot']['visualization'] if results['blackspot'] else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
558 |
contrast_output = results['contrast']['visualization'] if results['contrast'] else None
|
559 |
|
560 |
+
# Generate universal contrast report
|
561 |
+
if results['contrast']:
|
562 |
+
contrast_report = app.contrast_analyzer.generate_report(results['contrast'])
|
563 |
+
else:
|
564 |
+
contrast_report = "Contrast analysis not performed."
|
565 |
+
|
566 |
+
# Generate full report
|
567 |
+
report = generate_comprehensive_report(results, contrast_report)
|
568 |
|
569 |
+
return seg_output, blackspot_output, contrast_output, report
|
570 |
|
571 |
+
def generate_comprehensive_report(results: Dict, contrast_report: str) -> str:
|
572 |
+
"""Generate comprehensive analysis report"""
|
573 |
+
report = ["# 🧠 NeuroNest Analysis Report\n"]
|
574 |
+
report.append(f"*Generated: {time.strftime('%Y-%m-%d %H:%M:%S')}*\n")
|
575 |
|
576 |
# Segmentation results
|
577 |
if results['segmentation']:
|
578 |
stats = results['statistics'].get('segmentation', {})
|
579 |
+
report.append("## 🎯 Object Segmentation")
|
580 |
+
report.append(f"- **Classes detected:** {stats.get('num_classes', 'N/A')}")
|
581 |
+
report.append(f"- **Resolution:** {stats.get('image_size', 'N/A')}")
|
582 |
report.append("")
|
583 |
|
584 |
+
# Blackspot results
|
585 |
if results['blackspot']:
|
586 |
bs_stats = results['statistics'].get('blackspot', {})
|
587 |
+
report.append("## ⚫ Blackspot Detection (Floor Surfaces Only)")
|
588 |
report.append(f"- **Floor area:** {bs_stats.get('floor_area_pixels', 0):,} pixels")
|
589 |
report.append(f"- **Blackspot area:** {bs_stats.get('blackspot_area_pixels', 0):,} pixels")
|
590 |
report.append(f"- **Coverage:** {bs_stats.get('coverage_percentage', 0):.2f}% of floor")
|
591 |
+
report.append(f"- **Detections:** {bs_stats.get('num_detections', 0)}")
|
|
|
592 |
|
593 |
# Risk assessment
|
594 |
coverage = bs_stats.get('coverage_percentage', 0)
|
595 |
+
if coverage > 10:
|
596 |
+
report.append("- **⚠️ Risk:** CRITICAL - Immediate intervention required")
|
597 |
+
elif coverage > 5:
|
598 |
+
report.append("- **⚠️ Risk:** HIGH - Significant fall hazard")
|
599 |
elif coverage > 1:
|
600 |
+
report.append("- **⚠️ Risk:** MEDIUM - Potential safety concern")
|
601 |
elif coverage > 0:
|
602 |
+
report.append("- **✓ Risk:** LOW - Minor concern")
|
603 |
else:
|
604 |
+
report.append("- **✓ Risk:** NONE - No blackspots detected")
|
605 |
report.append("")
|
606 |
|
607 |
+
# Universal contrast analysis
|
608 |
+
report.append("## 🎨 Universal Contrast Analysis")
|
609 |
+
report.append(contrast_report)
|
610 |
+
report.append("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
611 |
|
612 |
+
# Recommendations
|
613 |
+
report.append("## 📋 Recommendations for Alzheimer's Care")
|
614 |
|
615 |
+
has_issues = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
616 |
|
617 |
+
if results['blackspot'] and results['statistics']['blackspot']['coverage_percentage'] > 0:
|
618 |
+
has_issues = True
|
619 |
+
report.append("\n### Blackspot Mitigation:")
|
620 |
+
report.append("- Replace dark flooring materials with lighter alternatives")
|
621 |
+
report.append("- Install additional lighting in affected areas")
|
622 |
+
report.append("- Use light-colored rugs or runners to cover dark spots")
|
623 |
+
report.append("- Add contrasting tape or markers around blackspot perimeters")
|
|
|
|
|
624 |
|
625 |
+
if results['contrast'] and results['statistics']['contrast']['low_contrast_pairs'] > 0:
|
626 |
+
has_issues = True
|
627 |
+
report.append("\n### Contrast Improvements:")
|
628 |
+
report.append("- Paint furniture in colors that contrast with floors/walls")
|
629 |
+
report.append("- Add colored tape or markers to furniture edges")
|
630 |
+
report.append("- Install LED strip lighting under furniture edges")
|
631 |
+
report.append("- Use contrasting placemats, cushions, or covers")
|
632 |
+
|
633 |
+
if not has_issues:
|
634 |
+
report.append("\n✅ **Excellent!** This environment appears well-optimized for individuals with Alzheimer's.")
|
635 |
+
report.append("No significant visual hazards detected.")
|
636 |
|
637 |
return "\n".join(report)
|
638 |
|
639 |
+
# Create the interface
|
640 |
+
title = "🧠 NeuroNest: AI-Powered Environment Safety Analysis"
|
641 |
description = """
|
642 |
+
**Advanced visual analysis for Alzheimer's and dementia care environments**
|
643 |
+
|
644 |
+
This system provides:
|
645 |
+
- **Object Segmentation**: Identifies all room elements (floors, walls, furniture)
|
646 |
+
- **Floor-Only Blackspot Detection**: Locates dangerous dark areas on walking surfaces
|
647 |
+
- **Universal Contrast Analysis**: Evaluates visibility between ALL adjacent objects
|
648 |
|
649 |
+
*Following WCAG 2.1 guidelines for visual accessibility*
|
|
|
|
|
|
|
650 |
"""
|
651 |
|
652 |
+
with gr.Blocks(title=title, theme=gr.themes.Soft()) as interface:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
653 |
|
654 |
gr.Markdown(f"# {title}")
|
655 |
gr.Markdown(description)
|
|
|
661 |
image_input = gr.Image(
|
662 |
label="📸 Upload Room Image",
|
663 |
type="filepath",
|
664 |
+
height=400
|
665 |
)
|
666 |
|
667 |
# Analysis settings
|
668 |
+
with gr.Accordion("⚙️ Analysis Settings", open=True):
|
669 |
enable_blackspot = gr.Checkbox(
|
670 |
value=blackspot_ok,
|
671 |
+
label="Enable Floor Blackspot Detection",
|
672 |
interactive=blackspot_ok
|
673 |
)
|
674 |
|
|
|
677 |
maximum=0.9,
|
678 |
value=0.5,
|
679 |
step=0.05,
|
680 |
+
label="Detection Sensitivity",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
681 |
visible=blackspot_ok
|
682 |
)
|
683 |
|
684 |
enable_contrast = gr.Checkbox(
|
685 |
value=True,
|
686 |
+
label="Enable Universal Contrast Analysis"
|
687 |
)
|
688 |
|
689 |
contrast_threshold = gr.Slider(
|
690 |
+
minimum=3.0,
|
691 |
+
maximum=7.0,
|
692 |
value=4.5,
|
693 |
step=0.1,
|
694 |
+
label="WCAG Contrast Threshold (4.5:1 recommended)"
|
695 |
)
|
696 |
|
697 |
# Analysis button
|
|
|
703 |
|
704 |
# Output Column
|
705 |
with gr.Column(scale=2):
|
706 |
+
# Analysis tabs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
707 |
with gr.Tabs():
|
708 |
with gr.Tab("📊 Analysis Report"):
|
709 |
analysis_report = gr.Markdown(
|
710 |
+
value="Upload an image and click 'Analyze Environment' to begin."
|
711 |
+
)
|
712 |
+
|
713 |
+
with gr.Tab("🎯 Object Segmentation"):
|
714 |
+
seg_display = gr.Image(
|
715 |
+
label="Detected Objects",
|
716 |
+
height=400,
|
717 |
+
interactive=False
|
718 |
)
|
719 |
|
720 |
if blackspot_ok:
|
721 |
+
with gr.Tab("⚫ Floor Blackspots"):
|
722 |
blackspot_display = gr.Image(
|
723 |
+
label="Blackspot Detection (Floors Only)",
|
724 |
+
height=400,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
725 |
interactive=False
|
726 |
)
|
727 |
else:
|
728 |
blackspot_display = gr.Image(visible=False)
|
|
|
729 |
|
730 |
with gr.Tab("🎨 Contrast Analysis"):
|
731 |
contrast_display = gr.Image(
|
732 |
+
label="Low Contrast Areas (All Objects)",
|
733 |
+
height=400,
|
734 |
interactive=False
|
735 |
)
|
736 |
|
|
|
742 |
blackspot_threshold,
|
743 |
contrast_threshold,
|
744 |
enable_blackspot,
|
745 |
+
enable_contrast
|
|
|
746 |
],
|
747 |
outputs=[
|
748 |
+
seg_display,
|
749 |
blackspot_display,
|
|
|
750 |
contrast_display,
|
751 |
analysis_report
|
752 |
]
|
753 |
)
|
754 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
755 |
# Footer
|
756 |
gr.Markdown("""
|
757 |
---
|
758 |
+
**NeuroNest** v2.0 - Enhanced with floor-only blackspot detection and universal contrast analysis
|
759 |
+
*Creating safer environments for cognitive health through AI*
|
760 |
""")
|
761 |
|
762 |
return interface
|
763 |
|
764 |
+
|
765 |
+
########################################
|
766 |
+
# MAIN EXECUTION
|
767 |
########################################
|
768 |
|
769 |
if __name__ == "__main__":
|
|
|
772 |
|
773 |
try:
|
774 |
interface = create_gradio_interface()
|
|
|
|
|
775 |
interface.queue(max_size=10).launch(
|
776 |
server_name="0.0.0.0",
|
777 |
server_port=7860,
|
778 |
+
share=False
|
779 |
)
|
780 |
except Exception as e:
|
781 |
logger.error(f"Failed to launch application: {e}")
|