|
""" |
|
Universal Contrast Analyzer for detecting low contrast between ALL adjacent objects. |
|
Optimized for Alzheimer's/dementia care environments. |
|
""" |
|
|
|
import numpy as np |
|
import cv2 |
|
from typing import Dict, List, Tuple, Optional |
|
import logging |
|
from scipy.spatial import distance |
|
from skimage.segmentation import find_boundaries |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class UniversalContrastAnalyzer: |
|
""" |
|
Analyzes contrast between ALL adjacent objects in a room. |
|
Ensures proper visibility for elderly individuals with Alzheimer's or dementia. |
|
""" |
|
|
|
def __init__(self, wcag_threshold: float = 4.5): |
|
self.wcag_threshold = wcag_threshold |
|
|
|
|
|
self.semantic_classes = { |
|
|
|
'floor': [3, 4, 13, 28, 78], |
|
|
|
|
|
'wall': [0, 1, 9], |
|
|
|
|
|
'ceiling': [5], |
|
|
|
|
|
'furniture': [10, 19, 15, 7, 18, 23, 30, 33, 34, 36, 44, 45, 57, 63, 64, 65, 75], |
|
|
|
|
|
|
|
|
|
'door': [25, 14], |
|
|
|
|
|
'window': [8], |
|
|
|
|
|
'stairs': [53, 59], |
|
|
|
|
|
'objects': [17, 20, 24, 37, 38, 39, 42, 62, 68, 71, 73, 80, 82, 84, 89, 90, 92, 93], |
|
|
|
|
|
|
|
|
|
'fixtures': [32, 46, 49, 50, 54, 66, 69, 70, 77, 94, 97, 98, 99, 117, 118, 119, 120], |
|
|
|
|
|
} |
|
|
|
|
|
self.class_to_category = {} |
|
for category, class_ids in self.semantic_classes.items(): |
|
for class_id in class_ids: |
|
self.class_to_category[class_id] = category |
|
|
|
def calculate_wcag_contrast(self, color1: np.ndarray, color2: np.ndarray) -> float: |
|
"""Calculate WCAG 2.0 contrast ratio between two colors""" |
|
def relative_luminance(rgb): |
|
|
|
rgb_norm = rgb / 255.0 |
|
|
|
|
|
rgb_linear = np.where( |
|
rgb_norm <= 0.03928, |
|
rgb_norm / 12.92, |
|
((rgb_norm + 0.055) / 1.055) ** 2.4 |
|
) |
|
|
|
|
|
return np.dot(rgb_linear, [0.2126, 0.7152, 0.0722]) |
|
|
|
lum1 = relative_luminance(color1) |
|
lum2 = relative_luminance(color2) |
|
|
|
|
|
lighter = max(lum1, lum2) |
|
darker = min(lum1, lum2) |
|
|
|
return (lighter + 0.05) / (darker + 0.05) |
|
|
|
def calculate_hue_difference(self, color1: np.ndarray, color2: np.ndarray) -> float: |
|
"""Calculate hue difference in degrees (0-180)""" |
|
|
|
hsv1 = cv2.cvtColor(color1.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] |
|
hsv2 = cv2.cvtColor(color2.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] |
|
|
|
|
|
hue_diff = abs(hsv1[0] - hsv2[0]) |
|
if hue_diff > 90: |
|
hue_diff = 180 - hue_diff |
|
|
|
return hue_diff |
|
|
|
def calculate_saturation_difference(self, color1: np.ndarray, color2: np.ndarray) -> float: |
|
"""Calculate saturation difference (0-255)""" |
|
hsv1 = cv2.cvtColor(color1.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] |
|
hsv2 = cv2.cvtColor(color2.reshape(1, 1, 3).astype(np.uint8), cv2.COLOR_RGB2HSV)[0, 0] |
|
|
|
return abs(int(hsv1[1]) - int(hsv2[1])) |
|
|
|
def extract_dominant_color(self, image: np.ndarray, mask: np.ndarray, |
|
sample_size: int = 1000) -> np.ndarray: |
|
"""Extract dominant color from masked region using robust statistics""" |
|
if not np.any(mask): |
|
return np.array([128, 128, 128]) |
|
|
|
|
|
masked_pixels = image[mask] |
|
if len(masked_pixels) == 0: |
|
return np.array([128, 128, 128]) |
|
|
|
|
|
if len(masked_pixels) > sample_size: |
|
indices = np.random.choice(len(masked_pixels), sample_size, replace=False) |
|
masked_pixels = masked_pixels[indices] |
|
|
|
|
|
dominant_color = np.median(masked_pixels, axis=0).astype(int) |
|
|
|
return dominant_color |
|
|
|
def find_adjacent_segments(self, segmentation: np.ndarray) -> Dict[Tuple[int, int], np.ndarray]: |
|
""" |
|
Find all pairs of adjacent segments and their boundaries. |
|
Returns dict mapping (seg1_id, seg2_id) to boundary mask. |
|
""" |
|
adjacencies = {} |
|
|
|
|
|
boundaries = find_boundaries(segmentation, mode='inner') |
|
|
|
|
|
h, w = segmentation.shape |
|
for y in range(1, h-1): |
|
for x in range(1, w-1): |
|
if boundaries[y, x]: |
|
center_id = segmentation[y, x] |
|
|
|
|
|
neighbors = [ |
|
segmentation[y-1, x], |
|
segmentation[y+1, x], |
|
segmentation[y, x-1], |
|
segmentation[y, x+1] |
|
] |
|
|
|
for neighbor_id in neighbors: |
|
if neighbor_id != center_id and neighbor_id != 0: |
|
|
|
pair = tuple(sorted([center_id, neighbor_id])) |
|
|
|
|
|
if pair not in adjacencies: |
|
adjacencies[pair] = np.zeros((h, w), dtype=bool) |
|
adjacencies[pair][y, x] = True |
|
|
|
|
|
min_boundary_pixels = 10 |
|
filtered_adjacencies = {} |
|
for pair, boundary in adjacencies.items(): |
|
if np.sum(boundary) >= min_boundary_pixels: |
|
filtered_adjacencies[pair] = boundary |
|
|
|
return filtered_adjacencies |
|
|
|
def is_object_on_surface(self, obj_mask: np.ndarray, surface_mask: np.ndarray, |
|
min_contact_ratio: float = 0.1) -> bool: |
|
""" |
|
Determine if an object is resting on a surface (e.g., object on floor). |
|
Uses vertical proximity and overlap analysis. |
|
""" |
|
if not np.any(obj_mask) or not np.any(surface_mask): |
|
return False |
|
|
|
|
|
obj_coords = np.where(obj_mask) |
|
if len(obj_coords[0]) == 0: |
|
return False |
|
|
|
obj_bottom_y = np.max(obj_coords[0]) |
|
obj_bottom_mask = obj_mask.copy() |
|
obj_bottom_mask[:obj_bottom_y-5, :] = False |
|
|
|
|
|
overlap = obj_bottom_mask & surface_mask |
|
|
|
|
|
obj_bottom_pixels = np.sum(obj_bottom_mask) |
|
if obj_bottom_pixels == 0: |
|
return False |
|
|
|
contact_ratio = np.sum(overlap) / obj_bottom_pixels |
|
|
|
return contact_ratio >= min_contact_ratio |
|
|
|
def analyze_contrast(self, image: np.ndarray, segmentation: np.ndarray) -> Dict: |
|
""" |
|
Perform comprehensive contrast analysis between ALL adjacent objects. |
|
|
|
Args: |
|
image: RGB image |
|
segmentation: Segmentation mask with class IDs |
|
|
|
Returns: |
|
Dictionary containing analysis results and visualizations |
|
""" |
|
h, w = segmentation.shape |
|
results = { |
|
'issues': [], |
|
'visualization': image.copy(), |
|
'statistics': { |
|
'total_segments': 0, |
|
'analyzed_pairs': 0, |
|
'low_contrast_pairs': 0, |
|
'critical_issues': 0, |
|
'floor_object_issues': 0 |
|
} |
|
} |
|
|
|
|
|
unique_segments = np.unique(segmentation) |
|
unique_segments = unique_segments[unique_segments != 0] |
|
results['statistics']['total_segments'] = len(unique_segments) |
|
|
|
|
|
segment_info = {} |
|
floor_segments = [] |
|
|
|
for seg_id in unique_segments: |
|
mask = segmentation == seg_id |
|
if np.sum(mask) < 50: |
|
continue |
|
|
|
category = self.class_to_category.get(seg_id, 'unknown') |
|
color = self.extract_dominant_color(image, mask) |
|
|
|
segment_info[seg_id] = { |
|
'category': category, |
|
'mask': mask, |
|
'color': color, |
|
'area': np.sum(mask), |
|
'class_id': seg_id |
|
} |
|
|
|
|
|
if category == 'floor': |
|
floor_segments.append(seg_id) |
|
|
|
|
|
adjacencies = self.find_adjacent_segments(segmentation) |
|
|
|
|
|
for (seg1_id, seg2_id), boundary in adjacencies.items(): |
|
if seg1_id not in segment_info or seg2_id not in segment_info: |
|
continue |
|
|
|
info1 = segment_info[seg1_id] |
|
info2 = segment_info[seg2_id] |
|
|
|
results['statistics']['analyzed_pairs'] += 1 |
|
|
|
|
|
wcag_ratio = self.calculate_wcag_contrast(info1['color'], info2['color']) |
|
hue_diff = self.calculate_hue_difference(info1['color'], info2['color']) |
|
sat_diff = self.calculate_saturation_difference(info1['color'], info2['color']) |
|
|
|
|
|
has_issue = False |
|
severity = 'low' |
|
|
|
|
|
if wcag_ratio < self.wcag_threshold: |
|
has_issue = True |
|
if wcag_ratio < 3.0: |
|
severity = 'critical' |
|
elif wcag_ratio < 4.0: |
|
severity = 'high' |
|
else: |
|
severity = 'medium' |
|
|
|
|
|
if hue_diff < 30 and sat_diff < 50 and wcag_ratio < 7.0: |
|
has_issue = True |
|
if severity == 'low': |
|
severity = 'medium' |
|
|
|
if has_issue: |
|
results['statistics']['low_contrast_pairs'] += 1 |
|
|
|
|
|
is_floor_object = False |
|
if info1['category'] == 'floor' or info2['category'] == 'floor': |
|
|
|
if info1['category'] == 'floor': |
|
floor_info, obj_info = info1, info2 |
|
else: |
|
floor_info, obj_info = info2, info1 |
|
|
|
if self.is_object_on_surface(obj_info['mask'], floor_info['mask']): |
|
is_floor_object = True |
|
results['statistics']['floor_object_issues'] += 1 |
|
if severity != 'critical': |
|
severity = 'high' |
|
|
|
if severity == 'critical': |
|
results['statistics']['critical_issues'] += 1 |
|
|
|
|
|
issue = { |
|
'segment_ids': (seg1_id, seg2_id), |
|
'categories': (info1['category'], info2['category']), |
|
'colors': (info1['color'], info2['color']), |
|
'wcag_ratio': wcag_ratio, |
|
'hue_difference': hue_diff, |
|
'saturation_difference': sat_diff, |
|
'boundary_pixels': np.sum(boundary), |
|
'severity': severity, |
|
'is_floor_object': is_floor_object, |
|
'boundary_mask': boundary |
|
} |
|
|
|
results['issues'].append(issue) |
|
|
|
|
|
self._visualize_issue(results['visualization'], boundary, severity) |
|
|
|
|
|
severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3} |
|
results['issues'].sort(key=lambda x: severity_order[x['severity']]) |
|
|
|
return results |
|
|
|
def _visualize_issue(self, image: np.ndarray, boundary: np.ndarray, severity: str): |
|
"""Add visual indicators for contrast issues""" |
|
|
|
colors = { |
|
'critical': (255, 0, 0), |
|
'high': (255, 128, 0), |
|
'medium': (255, 255, 0), |
|
'low': (128, 255, 128) |
|
} |
|
|
|
color = colors.get(severity, (255, 255, 255)) |
|
|
|
|
|
kernel = np.ones((3, 3), np.uint8) |
|
dilated = cv2.dilate(boundary.astype(np.uint8), kernel, iterations=2) |
|
|
|
|
|
image[dilated > 0] = color |
|
|
|
return image |
|
|
|
def generate_report(self, results: Dict) -> str: |
|
"""Generate a detailed text report of contrast analysis""" |
|
stats = results['statistics'] |
|
issues = results['issues'] |
|
|
|
report = [] |
|
report.append("=== Universal Contrast Analysis Report ===\n") |
|
|
|
|
|
report.append(f"Total segments analyzed: {stats['total_segments']}") |
|
report.append(f"Adjacent pairs analyzed: {stats['analyzed_pairs']}") |
|
report.append(f"Low contrast pairs found: {stats['low_contrast_pairs']}") |
|
report.append(f"Critical issues: {stats['critical_issues']}") |
|
report.append(f"Floor-object contrast issues: {stats['floor_object_issues']}\n") |
|
|
|
|
|
if issues: |
|
report.append("=== Contrast Issues (sorted by severity) ===\n") |
|
|
|
for i, issue in enumerate(issues, 1): |
|
cat1, cat2 = issue['categories'] |
|
wcag = issue['wcag_ratio'] |
|
severity = issue['severity'].upper() |
|
|
|
report.append(f"{i}. [{severity}] {cat1} ↔ {cat2}") |
|
report.append(f" - WCAG Contrast Ratio: {wcag:.2f} (minimum: {self.wcag_threshold})") |
|
report.append(f" - Hue Difference: {issue['hue_difference']:.1f}°") |
|
report.append(f" - Saturation Difference: {issue['saturation_difference']}") |
|
|
|
if issue['is_floor_object']: |
|
report.append(" - ⚠️ Object on floor - requires high visibility!") |
|
|
|
report.append(f" - Boundary size: {issue['boundary_pixels']} pixels") |
|
report.append("") |
|
else: |
|
report.append("✅ No contrast issues detected!") |
|
|
|
return "\n".join(report) |