|
""" |
|
Universal Contrast Analyzer for detecting low contrast between ALL adjacent objects. |
|
Optimized for Alzheimer's/dementia care environments. |
|
""" |
|
|
|
import numpy as np |
|
import cv2 |
|
from typing import Dict, List, Tuple, Optional |
|
import logging |
|
from scipy.spatial import distance |
|
from skimage.segmentation import find_boundaries |
|
from sklearn.cluster import DBSCAN |
|
import colorsys |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class UniversalContrastAnalyzer: |
|
""" |
|
Analyzes contrast between ALL adjacent objects in a room. |
|
Ensures proper visibility for elderly individuals with Alzheimer's or dementia. |
|
""" |
|
|
|
def __init__(self, wcag_threshold: float = 4.5): |
|
self.wcag_threshold = wcag_threshold |
|
|
|
|
|
self.semantic_classes = { |
|
|
|
'floor': [3, 4, 13, 28, 78], |
|
|
|
|
|
'wall': [0, 1, 9, 21], |
|
|
|
|
|
'ceiling': [5, 16], |
|
|
|
|
|
'furniture': [ |
|
10, 19, 15, 7, 18, 23, 30, 33, 34, 36, 44, 45, 57, 63, 64, 65, 75, |
|
|
|
|
|
|
|
], |
|
|
|
|
|
'door': [25, 14, 79], |
|
|
|
|
|
'window': [8, 14], |
|
|
|
|
|
'stairs': [53, 59], |
|
|
|
|
|
'objects': [ |
|
17, 20, 24, 37, 38, 39, 42, 62, 68, 71, 73, 80, 82, 84, 89, 90, 92, 93, |
|
|
|
|
|
], |
|
|
|
|
|
'fixtures': [ |
|
32, 46, 49, 50, 54, 66, 69, 70, 77, 94, 97, 98, 99, 117, 118, 119, 120, |
|
|
|
|
|
], |
|
|
|
|
|
'decorative': [ |
|
6, 12, 56, 60, 61, 72, 83, 91, 96, 100, 102, 104, 106, 110, 112, |
|
|
|
] |
|
} |
|
|
|
|
|
self.class_to_category = {} |
|
for category, class_ids in self.semantic_classes.items(): |
|
for class_id in class_ids: |
|
self.class_to_category[class_id] = category |
|
|
|
def calculate_wcag_contrast(self, color1: np.ndarray, color2: np.ndarray) -> float: |
|
"""Calculate WCAG 2.0 contrast ratio between two colors""" |
|
def relative_luminance(rgb): |
|
|
|
rgb_norm = np.array(rgb) / 255.0 |
|
|
|
|
|
rgb_linear = np.where( |
|
rgb_norm <= 0.03928, |
|
rgb_norm / 12.92, |
|
np.power((rgb_norm + 0.055) / 1.055, 2.4) |
|
) |
|
|
|
|
|
|
|
return 0.2126 * rgb_linear[0] + 0.7152 * rgb_linear[1] + 0.0722 * rgb_linear[2] |
|
|
|
lum1 = relative_luminance(color1) |
|
lum2 = relative_luminance(color2) |
|
|
|
|
|
lighter = max(lum1, lum2) |
|
darker = min(lum1, lum2) |
|
|
|
|
|
contrast_ratio = (lighter + 0.05) / (darker + 0.05) |
|
|
|
return contrast_ratio |
|
|
|
def calculate_hue_difference(self, color1: np.ndarray, color2: np.ndarray) -> float: |
|
"""Calculate hue difference in degrees (0-180)""" |
|
|
|
rgb1 = color1 / 255.0 |
|
rgb2 = color2 / 255.0 |
|
|
|
hsv1 = colorsys.rgb_to_hsv(rgb1[0], rgb1[1], rgb1[2]) |
|
hsv2 = colorsys.rgb_to_hsv(rgb2[0], rgb2[1], rgb2[2]) |
|
|
|
|
|
hue_diff = abs(hsv1[0] - hsv2[0]) * 180 |
|
if hue_diff > 90: |
|
hue_diff = 180 - hue_diff |
|
|
|
return hue_diff |
|
|
|
def calculate_saturation_difference(self, color1: np.ndarray, color2: np.ndarray) -> float: |
|
"""Calculate saturation difference (0-100)""" |
|
rgb1 = color1 / 255.0 |
|
rgb2 = color2 / 255.0 |
|
|
|
hsv1 = colorsys.rgb_to_hsv(rgb1[0], rgb1[1], rgb1[2]) |
|
hsv2 = colorsys.rgb_to_hsv(rgb2[0], rgb2[1], rgb2[2]) |
|
|
|
|
|
return abs(hsv1[1] - hsv2[1]) * 100 |
|
|
|
def extract_dominant_color(self, image: np.ndarray, mask: np.ndarray, |
|
sample_size: int = 1000) -> np.ndarray: |
|
"""Extract dominant color from masked region using robust statistics""" |
|
if not np.any(mask): |
|
return np.array([128, 128, 128]) |
|
|
|
|
|
masked_pixels = image[mask] |
|
if len(masked_pixels) == 0: |
|
return np.array([128, 128, 128]) |
|
|
|
|
|
if len(masked_pixels) > sample_size: |
|
indices = np.random.choice(len(masked_pixels), sample_size, replace=False) |
|
masked_pixels = masked_pixels[indices] |
|
|
|
|
|
if len(masked_pixels) > 50: |
|
try: |
|
clustering = DBSCAN(eps=30, min_samples=10).fit(masked_pixels) |
|
labels = clustering.labels_ |
|
|
|
|
|
unique_labels, counts = np.unique(labels[labels >= 0], return_counts=True) |
|
if len(unique_labels) > 0: |
|
dominant_label = unique_labels[np.argmax(counts)] |
|
dominant_colors = masked_pixels[labels == dominant_label] |
|
return np.median(dominant_colors, axis=0).astype(int) |
|
except: |
|
pass |
|
|
|
|
|
return np.median(masked_pixels, axis=0).astype(int) |
|
|
|
def find_adjacent_segments(self, segmentation: np.ndarray) -> Dict[Tuple[int, int], np.ndarray]: |
|
""" |
|
Find all pairs of adjacent segments and their boundaries. |
|
Returns dict mapping (seg1_id, seg2_id) to boundary mask. |
|
""" |
|
adjacencies = {} |
|
|
|
|
|
boundaries = find_boundaries(segmentation, mode='inner') |
|
|
|
|
|
h, w = segmentation.shape |
|
for y in range(1, h-1): |
|
for x in range(1, w-1): |
|
if boundaries[y, x]: |
|
center_id = segmentation[y, x] |
|
|
|
|
|
neighbors = [ |
|
segmentation[y-1, x], |
|
segmentation[y+1, x], |
|
segmentation[y, x-1], |
|
segmentation[y, x+1], |
|
segmentation[y-1, x-1], |
|
segmentation[y-1, x+1], |
|
segmentation[y+1, x-1], |
|
segmentation[y+1, x+1] |
|
] |
|
|
|
for neighbor_id in neighbors: |
|
if neighbor_id != center_id and neighbor_id != 0: |
|
|
|
pair = tuple(sorted([center_id, neighbor_id])) |
|
|
|
|
|
if pair not in adjacencies: |
|
adjacencies[pair] = np.zeros((h, w), dtype=bool) |
|
adjacencies[pair][y, x] = True |
|
|
|
|
|
min_boundary_pixels = 20 |
|
filtered_adjacencies = {} |
|
for pair, boundary in adjacencies.items(): |
|
if np.sum(boundary) >= min_boundary_pixels: |
|
filtered_adjacencies[pair] = boundary |
|
|
|
return filtered_adjacencies |
|
|
|
def is_contrast_sufficient(self, color1: np.ndarray, color2: np.ndarray, |
|
category1: str, category2: str) -> Tuple[bool, str]: |
|
""" |
|
Determine if contrast is sufficient based on WCAG and perceptual guidelines. |
|
Returns (is_sufficient, severity_if_not) |
|
""" |
|
wcag_ratio = self.calculate_wcag_contrast(color1, color2) |
|
hue_diff = self.calculate_hue_difference(color1, color2) |
|
sat_diff = self.calculate_saturation_difference(color1, color2) |
|
|
|
|
|
critical_pairs = [ |
|
('floor', 'stairs'), |
|
('floor', 'door'), |
|
('stairs', 'wall') |
|
] |
|
|
|
|
|
high_priority_pairs = [ |
|
('floor', 'furniture'), |
|
('wall', 'door'), |
|
('wall', 'furniture'), |
|
('floor', 'objects') |
|
] |
|
|
|
|
|
relationship = tuple(sorted([category1, category2])) |
|
|
|
|
|
if relationship in critical_pairs: |
|
|
|
if wcag_ratio < 7.0: |
|
return False, 'critical' |
|
|
|
if wcag_ratio < 10.0 and hue_diff < 30 and sat_diff < 50: |
|
return False, 'critical' |
|
|
|
elif relationship in high_priority_pairs: |
|
|
|
if wcag_ratio < 4.5: |
|
return False, 'high' |
|
|
|
if wcag_ratio < 7.0 and hue_diff < 20 and sat_diff < 40: |
|
return False, 'high' |
|
|
|
else: |
|
|
|
if wcag_ratio < 3.0: |
|
return False, 'medium' |
|
|
|
if wcag_ratio < 4.5 and hue_diff < 15 and sat_diff < 30: |
|
return False, 'medium' |
|
|
|
return True, None |
|
|
|
def analyze_contrast(self, image: np.ndarray, segmentation: np.ndarray) -> Dict: |
|
""" |
|
Perform comprehensive contrast analysis between ALL adjacent objects. |
|
|
|
Args: |
|
image: RGB image |
|
segmentation: Segmentation mask with class IDs |
|
|
|
Returns: |
|
Dictionary containing analysis results and visualizations |
|
""" |
|
h, w = segmentation.shape |
|
results = { |
|
'issues': [], |
|
'visualization': image.copy(), |
|
'statistics': { |
|
'total_segments': 0, |
|
'analyzed_pairs': 0, |
|
'low_contrast_pairs': 0, |
|
'critical_issues': 0, |
|
'high_priority_issues': 0, |
|
'medium_priority_issues': 0, |
|
'floor_object_issues': 0 |
|
} |
|
} |
|
|
|
|
|
unique_segments = np.unique(segmentation) |
|
unique_segments = unique_segments[unique_segments != 0] |
|
results['statistics']['total_segments'] = len(unique_segments) |
|
|
|
|
|
segment_info = {} |
|
|
|
logger.info(f"Building segment information for {len(unique_segments)} segments...") |
|
|
|
for seg_id in unique_segments: |
|
mask = segmentation == seg_id |
|
area = np.sum(mask) |
|
|
|
if area < 50: |
|
continue |
|
|
|
category = self.class_to_category.get(seg_id, 'unknown') |
|
color = self.extract_dominant_color(image, mask) |
|
|
|
segment_info[seg_id] = { |
|
'category': category, |
|
'mask': mask, |
|
'color': color, |
|
'area': area, |
|
'class_id': seg_id |
|
} |
|
|
|
|
|
logger.info("Finding adjacent segments...") |
|
adjacencies = self.find_adjacent_segments(segmentation) |
|
logger.info(f"Found {len(adjacencies)} adjacent segment pairs") |
|
|
|
|
|
for (seg1_id, seg2_id), boundary in adjacencies.items(): |
|
if seg1_id not in segment_info or seg2_id not in segment_info: |
|
continue |
|
|
|
info1 = segment_info[seg1_id] |
|
info2 = segment_info[seg2_id] |
|
|
|
|
|
if info1['category'] == 'unknown' and info2['category'] == 'unknown': |
|
continue |
|
|
|
results['statistics']['analyzed_pairs'] += 1 |
|
|
|
|
|
is_sufficient, severity = self.is_contrast_sufficient( |
|
info1['color'], info2['color'], |
|
info1['category'], info2['category'] |
|
) |
|
|
|
if not is_sufficient: |
|
results['statistics']['low_contrast_pairs'] += 1 |
|
|
|
|
|
wcag_ratio = self.calculate_wcag_contrast(info1['color'], info2['color']) |
|
hue_diff = self.calculate_hue_difference(info1['color'], info2['color']) |
|
sat_diff = self.calculate_saturation_difference(info1['color'], info2['color']) |
|
|
|
|
|
is_floor_object = ( |
|
(info1['category'] == 'floor' and info2['category'] in ['furniture', 'objects']) or |
|
(info2['category'] == 'floor' and info1['category'] in ['furniture', 'objects']) |
|
) |
|
|
|
if is_floor_object: |
|
results['statistics']['floor_object_issues'] += 1 |
|
|
|
|
|
if severity == 'critical': |
|
results['statistics']['critical_issues'] += 1 |
|
elif severity == 'high': |
|
results['statistics']['high_priority_issues'] += 1 |
|
elif severity == 'medium': |
|
results['statistics']['medium_priority_issues'] += 1 |
|
|
|
|
|
issue = { |
|
'segment_ids': (seg1_id, seg2_id), |
|
'categories': (info1['category'], info2['category']), |
|
'colors': (info1['color'].tolist(), info2['color'].tolist()), |
|
'wcag_ratio': float(wcag_ratio), |
|
'hue_difference': float(hue_diff), |
|
'saturation_difference': float(sat_diff), |
|
'boundary_pixels': int(np.sum(boundary)), |
|
'severity': severity, |
|
'is_floor_object': is_floor_object, |
|
'boundary_mask': boundary |
|
} |
|
|
|
results['issues'].append(issue) |
|
|
|
|
|
self._visualize_issue(results['visualization'], boundary, severity) |
|
|
|
|
|
severity_order = {'critical': 0, 'high': 1, 'medium': 2} |
|
results['issues'].sort(key=lambda x: severity_order.get(x['severity'], 3)) |
|
|
|
logger.info(f"Contrast analysis complete: {results['statistics']['low_contrast_pairs']} issues found") |
|
|
|
return results |
|
|
|
def _visualize_issue(self, image: np.ndarray, boundary: np.ndarray, severity: str): |
|
"""Add visual indicators for contrast issues""" |
|
|
|
colors = { |
|
'critical': (255, 0, 0), |
|
'high': (255, 128, 0), |
|
'medium': (255, 255, 0), |
|
} |
|
|
|
color = colors.get(severity, (255, 255, 255)) |
|
|
|
|
|
kernel = np.ones((3, 3), np.uint8) |
|
dilated = cv2.dilate(boundary.astype(np.uint8), kernel, iterations=2) |
|
|
|
|
|
overlay = image.copy() |
|
overlay[dilated > 0] = color |
|
cv2.addWeighted(overlay, 0.5, image, 0.5, 0, image) |
|
|
|
return image |
|
|
|
def generate_report(self, results: Dict) -> str: |
|
"""Generate a detailed text report of contrast analysis""" |
|
stats = results['statistics'] |
|
issues = results['issues'] |
|
|
|
report = [] |
|
report.append("=== Universal Contrast Analysis Report ===\n") |
|
|
|
|
|
report.append(f"Total segments analyzed: {stats['total_segments']}") |
|
report.append(f"Adjacent pairs analyzed: {stats['analyzed_pairs']}") |
|
report.append(f"Low contrast pairs found: {stats['low_contrast_pairs']}") |
|
report.append(f"- Critical issues: {stats['critical_issues']}") |
|
report.append(f"- High priority issues: {stats['high_priority_issues']}") |
|
report.append(f"- Medium priority issues: {stats['medium_priority_issues']}") |
|
report.append(f"Floor-object contrast issues: {stats['floor_object_issues']}\n") |
|
|
|
|
|
if issues: |
|
report.append("=== Contrast Issues (sorted by severity) ===\n") |
|
|
|
for i, issue in enumerate(issues[:10], 1): |
|
cat1, cat2 = issue['categories'] |
|
wcag = issue['wcag_ratio'] |
|
hue_diff = issue['hue_difference'] |
|
sat_diff = issue['saturation_difference'] |
|
severity = issue['severity'].upper() |
|
|
|
report.append(f"{i}. [{severity}] {cat1} ↔ {cat2}") |
|
report.append(f" - WCAG Contrast Ratio: {wcag:.2f}:1") |
|
|
|
|
|
if severity == 'CRITICAL': |
|
report.append(f" - Required: 7:1 minimum") |
|
elif severity == 'HIGH': |
|
report.append(f" - Required: 4.5:1 minimum") |
|
else: |
|
report.append(f" - Required: 3:1 minimum") |
|
|
|
report.append(f" - Hue Difference: {hue_diff:.1f}° (recommended: >30°)") |
|
report.append(f" - Saturation Difference: {sat_diff:.1f}% (recommended: >50%)") |
|
|
|
if issue['is_floor_object']: |
|
report.append(" - ⚠️ Floor-object boundary - high visibility required!") |
|
|
|
report.append(f" - Boundary size: {issue['boundary_pixels']} pixels") |
|
report.append("") |
|
else: |
|
report.append("✅ No contrast issues detected!") |
|
|
|
return "\n".join(report) |
|
|