|
import torch |
|
import numpy as np |
|
from PIL import Image |
|
import matplotlib.pyplot as plt |
|
import matplotlib.figure as figure |
|
from matplotlib.figure import Figure |
|
import numpy.typing as npt |
|
import os |
|
import sys |
|
import tempfile |
|
import time |
|
|
|
class RegionColorMatcher: |
|
def __init__(self, factor=1.0, preserve_colors=True, preserve_luminance=True, method="adain"): |
|
""" |
|
Initialize the RegionColorMatcher. |
|
|
|
Args: |
|
factor: Strength of the color matching (0.0 to 1.0) |
|
preserve_colors: If True, convert to YUV and preserve color relationships |
|
preserve_luminance: If True, preserve the luminance when in YUV mode |
|
method: The color matching method to use (adain, mkl, hm, reinhard, mvgd, hm-mvgd-hm, hm-mkl-hm) |
|
""" |
|
self.factor = factor |
|
self.preserve_colors = preserve_colors |
|
self.preserve_luminance = preserve_luminance |
|
self.method = method |
|
|
|
def match_regions(self, img1_path, img2_path, masks1, masks2): |
|
""" |
|
Match colors between corresponding masked regions of two images. |
|
|
|
Args: |
|
img1_path: Path to first image |
|
img2_path: Path to second image |
|
masks1: Dictionary of masks for first image {label: binary_mask} |
|
masks2: Dictionary of masks for second image {label: binary_mask} |
|
|
|
Returns: |
|
A PIL Image with the color-matched result |
|
""" |
|
print(f"π¨ Color matching with method: {self.method}") |
|
print(f"π Processing {len(masks1)} regions from img1 and {len(masks2)} regions from img2") |
|
|
|
|
|
img1 = Image.open(img1_path).convert("RGB") |
|
img2 = Image.open(img2_path).convert("RGB") |
|
|
|
|
|
img1_np = np.array(img1).astype(np.float32) / 255.0 |
|
img2_np = np.array(img2).astype(np.float32) / 255.0 |
|
|
|
|
|
|
|
result_np = np.copy(img2_np) |
|
|
|
|
|
img1_tensor = torch.from_numpy(img1_np) |
|
img2_tensor = torch.from_numpy(img2_np) |
|
result_tensor = torch.from_numpy(result_np) |
|
|
|
|
|
total_coverage = np.zeros(img2_np.shape[:2], dtype=np.float32) |
|
processed_regions = 0 |
|
|
|
|
|
for label, mask1 in masks1.items(): |
|
if label not in masks2: |
|
print(f"β οΈ Skipping {label} - not found in masks2") |
|
continue |
|
|
|
mask2 = masks2[label] |
|
|
|
|
|
if mask1.shape != img1_np.shape[:2]: |
|
mask1 = self._resize_mask(mask1, img1_np.shape[:2]) |
|
|
|
if mask2.shape != img2_np.shape[:2]: |
|
mask2 = self._resize_mask(mask2, img2_np.shape[:2]) |
|
|
|
|
|
mask1_pixels = np.sum(mask1 > 0) |
|
mask2_pixels = np.sum(mask2 > 0) |
|
print(f"π Processing {label}: {mask1_pixels} pixels (img1) β {mask2_pixels} pixels (img2)") |
|
|
|
if mask1_pixels == 0 or mask2_pixels == 0: |
|
print(f"β οΈ Skipping {label} - no pixels in mask") |
|
continue |
|
|
|
|
|
total_coverage += (mask2 > 0).astype(np.float32) |
|
processed_regions += 1 |
|
|
|
|
|
mask1_tensor = torch.from_numpy(mask1.astype(np.float32)) |
|
mask2_tensor = torch.from_numpy(mask2.astype(np.float32)) |
|
|
|
|
|
if self.method == "adain": |
|
result_tensor = self._apply_adain_to_region( |
|
img1_tensor, |
|
img2_tensor, |
|
result_tensor, |
|
mask1_tensor, |
|
mask2_tensor |
|
) |
|
else: |
|
result_tensor = self._apply_color_matcher_to_region( |
|
img1_tensor, |
|
img2_tensor, |
|
result_tensor, |
|
mask1_tensor, |
|
mask2_tensor, |
|
self.method |
|
) |
|
|
|
print(f"β
Completed color matching for {label}") |
|
|
|
|
|
total_pixels = img2_np.shape[0] * img2_np.shape[1] |
|
covered_pixels = np.sum(total_coverage > 0) |
|
overlap_pixels = np.sum(total_coverage > 1) |
|
|
|
print(f"π Coverage summary:") |
|
print(f" Total image pixels: {total_pixels}") |
|
print(f" Covered pixels: {covered_pixels} ({100*covered_pixels/total_pixels:.1f}%)") |
|
print(f" Overlapping pixels: {overlap_pixels} ({100*overlap_pixels/total_pixels:.1f}%)") |
|
print(f" Processed regions: {processed_regions}") |
|
|
|
|
|
result_np = (result_tensor.numpy() * 255.0).astype(np.uint8) |
|
|
|
|
|
result_img = Image.fromarray(result_np) |
|
|
|
return result_img |
|
|
|
def _resize_mask(self, mask, target_shape): |
|
""" |
|
Resize a mask to match the target shape. |
|
|
|
Args: |
|
mask: Binary mask array |
|
target_shape: Target shape (height, width) |
|
|
|
Returns: |
|
Resized mask array |
|
""" |
|
|
|
mask_img = Image.fromarray((mask * 255).astype(np.uint8)) |
|
|
|
|
|
mask_img = mask_img.resize((target_shape[1], target_shape[0]), Image.NEAREST) |
|
|
|
|
|
resized_mask = np.array(mask_img).astype(np.float32) / 255.0 |
|
|
|
return resized_mask |
|
|
|
def _apply_adain_to_region(self, source_img, target_img, result_img, source_mask, target_mask): |
|
""" |
|
Apply AdaIN to match the statistics of the masked region in source to the target. |
|
|
|
Args: |
|
source_img: Source image tensor [H,W,3] (reference for color matching) |
|
target_img: Target image tensor [H,W,3] (to be color matched) |
|
result_img: Result image tensor to modify [H,W,3] |
|
source_mask: Binary mask for source image [H,W] |
|
target_mask: Binary mask for target image [H,W] |
|
|
|
Returns: |
|
Modified result tensor |
|
""" |
|
|
|
source_mask_binary = (source_mask > 0.5).float() |
|
target_mask_binary = (target_mask > 0.5).float() |
|
|
|
|
|
if self.preserve_colors: |
|
|
|
rgb_to_yuv = torch.tensor([ |
|
[0.299, 0.587, 0.114], |
|
[-0.14713, -0.28886, 0.436], |
|
[0.615, -0.51499, -0.10001] |
|
]) |
|
|
|
|
|
source_yuv = torch.matmul(source_img, rgb_to_yuv.T) |
|
target_yuv = torch.matmul(target_img, rgb_to_yuv.T) |
|
result_yuv = torch.matmul(result_img, rgb_to_yuv.T) |
|
|
|
|
|
channels_to_process = [0] if not self.preserve_luminance else [] |
|
|
|
|
|
channels_to_process.extend([1, 2]) |
|
|
|
|
|
for c in channels_to_process: |
|
|
|
result_channel = result_yuv[:,:,c] |
|
matched_channel = self._match_channel_statistics( |
|
source_yuv[:,:,c], |
|
target_yuv[:,:,c], |
|
result_channel, |
|
source_mask_binary, |
|
target_mask_binary |
|
) |
|
|
|
|
|
mask_expanded = target_mask_binary.unsqueeze(-1).expand_as(result_yuv)[:,:,c] |
|
result_yuv[:,:,c] = torch.where( |
|
mask_expanded > 0.5, |
|
matched_channel, |
|
result_channel |
|
) |
|
|
|
|
|
yuv_to_rgb = torch.tensor([ |
|
[1.0, 0.0, 1.13983], |
|
[1.0, -0.39465, -0.58060], |
|
[1.0, 2.03211, 0.0] |
|
]) |
|
|
|
result_rgb = torch.matmul(result_yuv, yuv_to_rgb.T) |
|
|
|
|
|
mask_expanded = target_mask_binary.unsqueeze(-1).expand_as(result_img) |
|
result_img = torch.where( |
|
mask_expanded > 0.5, |
|
result_rgb, |
|
result_img |
|
) |
|
|
|
else: |
|
|
|
for c in range(3): |
|
|
|
result_channel = result_img[:,:,c] |
|
matched_channel = self._match_channel_statistics( |
|
source_img[:,:,c], |
|
target_img[:,:,c], |
|
result_channel, |
|
source_mask_binary, |
|
target_mask_binary |
|
) |
|
|
|
|
|
mask_expanded = target_mask_binary.unsqueeze(-1).expand_as(result_img)[:,:,c] |
|
result_img[:,:,c] = torch.where( |
|
mask_expanded > 0.5, |
|
matched_channel, |
|
result_channel |
|
) |
|
|
|
|
|
return torch.clamp(result_img, 0.0, 1.0) |
|
|
|
def _apply_color_matcher_to_region(self, source_img, target_img, result_img, source_mask, target_mask, method): |
|
""" |
|
Apply color-matcher library methods to match the statistics of the masked region in source to the target. |
|
|
|
Args: |
|
source_img: Source image tensor [H,W,3] (reference for color matching) |
|
target_img: Target image tensor [H,W,3] (to be color matched) |
|
result_img: Result image tensor to modify [H,W,3] |
|
source_mask: Binary mask for source image [H,W] |
|
target_mask: Binary mask for target image [H,W] |
|
method: The color matching method to use (mkl, hm, reinhard, mvgd, hm-mvgd-hm, hm-mkl-hm) |
|
|
|
Returns: |
|
Modified result tensor |
|
""" |
|
|
|
source_mask_binary = (source_mask > 0.5).float() |
|
target_mask_binary = (target_mask > 0.5).float() |
|
|
|
|
|
source_np = source_img.detach().cpu().numpy() |
|
target_np = target_img.detach().cpu().numpy() |
|
source_mask_np = source_mask_binary.detach().cpu().numpy() |
|
target_mask_np = target_mask_binary.detach().cpu().numpy() |
|
|
|
try: |
|
|
|
try: |
|
from color_matcher import ColorMatcher |
|
from color_matcher.normalizer import Normalizer |
|
except ImportError: |
|
self._install_package("color-matcher") |
|
from color_matcher import ColorMatcher |
|
from color_matcher.normalizer import Normalizer |
|
|
|
|
|
source_coords = np.where(source_mask_np > 0.5) |
|
target_coords = np.where(target_mask_np > 0.5) |
|
|
|
if len(source_coords[0]) == 0 or len(target_coords[0]) == 0: |
|
return result_img |
|
|
|
|
|
source_pixels = source_np[source_coords] |
|
target_pixels = target_np[target_coords] |
|
|
|
|
|
cm = ColorMatcher() |
|
|
|
if method == "mkl": |
|
|
|
source_mean = np.mean(source_pixels, axis=0) |
|
source_std = np.std(source_pixels, axis=0) |
|
target_mean = np.mean(target_pixels, axis=0) |
|
target_std = np.std(target_pixels, axis=0) |
|
|
|
|
|
result_np = np.copy(target_np) |
|
for c in range(3): |
|
|
|
normalized = (target_np[:,:,c] - target_mean[c]) / (target_std[c] + 1e-8) * source_std[c] + source_mean[c] |
|
|
|
|
|
result_np[:,:,c] = np.where(target_mask_np > 0.5, normalized, target_np[:,:,c]) |
|
|
|
|
|
result_tensor = torch.from_numpy(result_np).to(result_img.device) |
|
|
|
|
|
result_img = torch.lerp(result_img, result_tensor, self.factor) |
|
|
|
elif method == "reinhard": |
|
|
|
source_mean = np.mean(source_pixels, axis=0) |
|
source_std = np.std(source_pixels, axis=0) |
|
target_mean = np.mean(target_pixels, axis=0) |
|
target_std = np.std(target_pixels, axis=0) |
|
|
|
|
|
result_np = np.copy(target_np) |
|
for c in range(3): |
|
|
|
normalized = (target_np[:,:,c] - target_mean[c]) / (target_std[c] + 1e-8) * source_std[c] + source_mean[c] |
|
|
|
|
|
result_np[:,:,c] = np.where(target_mask_np > 0.5, normalized, target_np[:,:,c]) |
|
|
|
|
|
result_tensor = torch.from_numpy(result_np).to(result_img.device) |
|
|
|
|
|
result_img = torch.lerp(result_img, result_tensor, self.factor) |
|
|
|
elif method == "mvgd": |
|
|
|
source_mean = np.mean(source_pixels, axis=0) |
|
source_cov = np.cov(source_pixels, rowvar=False) |
|
target_mean = np.mean(target_pixels, axis=0) |
|
target_cov = np.cov(target_pixels, rowvar=False) |
|
|
|
|
|
if np.isnan(source_cov).any() or np.isnan(target_cov).any(): |
|
|
|
source_std = np.std(source_pixels, axis=0) |
|
target_std = np.std(target_pixels, axis=0) |
|
|
|
result_np = np.copy(target_np) |
|
for c in range(3): |
|
normalized = (target_np[:,:,c] - target_mean[c]) / (target_std[c] + 1e-8) * source_std[c] + source_mean[c] |
|
result_np[:,:,c] = np.where(target_mask_np > 0.5, normalized, target_np[:,:,c]) |
|
else: |
|
|
|
|
|
target_flat = target_np.reshape(-1, 3) |
|
result_np = np.copy(target_np) |
|
|
|
try: |
|
|
|
source_cov_sqrt = np.linalg.cholesky(source_cov) |
|
target_cov_sqrt = np.linalg.cholesky(target_cov) |
|
target_cov_sqrt_inv = np.linalg.inv(target_cov_sqrt) |
|
|
|
|
|
temp = target_cov_sqrt_inv @ source_cov @ target_cov_sqrt_inv.T |
|
temp_sqrt_inv = np.linalg.inv(np.linalg.cholesky(temp)) |
|
A = target_cov_sqrt @ temp_sqrt_inv @ target_cov_sqrt_inv |
|
|
|
|
|
for i in range(target_np.shape[0]): |
|
for j in range(target_np.shape[1]): |
|
if target_mask_np[i, j] > 0.5: |
|
|
|
pixel = target_np[i, j] |
|
centered = pixel - target_mean |
|
transformed = centered @ A.T + source_mean |
|
result_np[i, j] = transformed |
|
except np.linalg.LinAlgError: |
|
|
|
source_std = np.std(source_pixels, axis=0) |
|
target_std = np.std(target_pixels, axis=0) |
|
|
|
for c in range(3): |
|
normalized = (target_np[:,:,c] - target_mean[c]) / (target_std[c] + 1e-8) * source_std[c] + source_mean[c] |
|
result_np[:,:,c] = np.where(target_mask_np > 0.5, normalized, target_np[:,:,c]) |
|
|
|
|
|
result_tensor = torch.from_numpy(result_np).to(result_img.device) |
|
|
|
|
|
result_img = torch.lerp(result_img, result_tensor, self.factor) |
|
|
|
elif method in ["hm", "hm-mvgd-hm", "hm-mkl-hm"]: |
|
|
|
|
|
|
|
source_min_y, source_min_x = np.min(source_coords[0]), np.min(source_coords[1]) |
|
source_max_y, source_max_x = np.max(source_coords[0]), np.max(source_coords[1]) |
|
target_min_y, target_min_x = np.min(target_coords[0]), np.min(target_coords[1]) |
|
target_max_y, target_max_x = np.max(target_coords[0]), np.max(target_coords[1]) |
|
|
|
|
|
source_crop = source_np[source_min_y:source_max_y+1, source_min_x:source_max_x+1].copy() |
|
target_crop = target_np[target_min_y:target_max_y+1, target_min_x:target_max_x+1].copy() |
|
|
|
|
|
source_mask_crop = source_mask_np[source_min_y:source_max_y+1, source_min_x:source_max_x+1] |
|
target_mask_crop = target_mask_np[target_min_y:target_max_y+1, target_min_x:target_max_x+1] |
|
|
|
|
|
|
|
source_avg_color = np.mean(source_pixels, axis=0) |
|
target_avg_color = np.mean(target_pixels, axis=0) |
|
|
|
for c in range(3): |
|
source_crop[:, :, c] = np.where(source_mask_crop > 0.5, source_crop[:, :, c], source_avg_color[c]) |
|
target_crop[:, :, c] = np.where(target_mask_crop > 0.5, target_crop[:, :, c], target_avg_color[c]) |
|
|
|
try: |
|
|
|
matched_crop = cm.transfer(src=target_crop, ref=source_crop, method=method) |
|
|
|
|
|
result_np = np.copy(target_np) |
|
|
|
|
|
for i in range(target_crop.shape[0]): |
|
for j in range(target_crop.shape[1]): |
|
orig_i = target_min_y + i |
|
orig_j = target_min_x + j |
|
if orig_i < target_np.shape[0] and orig_j < target_np.shape[1] and target_mask_np[orig_i, orig_j] > 0.5: |
|
result_np[orig_i, orig_j] = matched_crop[i, j] |
|
|
|
|
|
result_tensor = torch.from_numpy(result_np).to(result_img.device) |
|
|
|
|
|
result_img = torch.lerp(result_img, result_tensor, self.factor) |
|
|
|
except Exception as e: |
|
|
|
print(f"Color matcher failed for {method}, using fallback: {str(e)}") |
|
result_img = self._apply_adain_to_region( |
|
source_img, |
|
target_img, |
|
result_img, |
|
source_mask_binary, |
|
target_mask_binary |
|
) |
|
|
|
elif method == "coral": |
|
|
|
try: |
|
|
|
source_masked = source_np.copy() |
|
target_masked = target_np.copy() |
|
|
|
|
|
source_avg_color = np.mean(source_pixels, axis=0) |
|
target_avg_color = np.mean(target_pixels, axis=0) |
|
|
|
for c in range(3): |
|
source_masked[:, :, c] = np.where(source_mask_np > 0.5, source_masked[:, :, c], source_avg_color[c]) |
|
target_masked[:, :, c] = np.where(target_mask_np > 0.5, target_masked[:, :, c], target_avg_color[c]) |
|
|
|
|
|
source_tensor = torch.from_numpy(source_masked).permute(2, 0, 1).float() |
|
target_tensor = torch.from_numpy(target_masked).permute(2, 0, 1).float() |
|
|
|
|
|
matched_tensor = coral(target_tensor, source_tensor) |
|
|
|
|
|
matched_np = matched_tensor.permute(1, 2, 0).numpy() |
|
|
|
|
|
result_np = np.copy(target_np) |
|
for c in range(3): |
|
result_np[:, :, c] = np.where(target_mask_np > 0.5, matched_np[:, :, c], target_np[:, :, c]) |
|
|
|
|
|
result_tensor = torch.from_numpy(result_np).to(result_img.device) |
|
|
|
|
|
result_img = torch.lerp(result_img, result_tensor, self.factor) |
|
|
|
except Exception as e: |
|
|
|
print(f"CORAL failed for {method}, using fallback: {str(e)}") |
|
result_img = self._apply_adain_to_region( |
|
source_img, |
|
target_img, |
|
result_img, |
|
source_mask_binary, |
|
target_mask_binary |
|
) |
|
else: |
|
|
|
result_img = self._apply_adain_to_region( |
|
source_img, |
|
target_img, |
|
result_img, |
|
source_mask_binary, |
|
target_mask_binary |
|
) |
|
|
|
except Exception as e: |
|
|
|
print(f"Error in color matching: {str(e)}, using AdaIN as fallback") |
|
result_img = self._apply_adain_to_region( |
|
source_img, |
|
target_img, |
|
result_img, |
|
source_mask_binary, |
|
target_mask_binary |
|
) |
|
|
|
return torch.clamp(result_img, 0.0, 1.0) |
|
|
|
def _match_channel_statistics(self, source_channel, target_channel, result_channel, source_mask, target_mask): |
|
""" |
|
Match the statistics of a single channel. |
|
|
|
Args: |
|
source_channel: Source channel [H,W] (reference for color matching) |
|
target_channel: Target channel [H,W] (to be color matched) |
|
result_channel: Result channel to modify [H,W] |
|
source_mask: Binary mask for source [H,W] |
|
target_mask: Binary mask for target [H,W] |
|
|
|
Returns: |
|
Modified result channel |
|
""" |
|
|
|
source_count = torch.sum(source_mask) |
|
target_count = torch.sum(target_mask) |
|
|
|
if source_count > 0 and target_count > 0: |
|
|
|
source_masked = source_channel * source_mask |
|
target_masked = target_channel * target_mask |
|
|
|
|
|
source_mean = torch.sum(source_masked) / source_count |
|
target_mean = torch.sum(target_masked) / target_count |
|
|
|
|
|
source_var = torch.sum(((source_channel - source_mean) * source_mask) ** 2) / source_count |
|
target_var = torch.sum(((target_channel - target_mean) * target_mask) ** 2) / target_count |
|
|
|
|
|
source_std = torch.sqrt(source_var + 1e-8) |
|
target_std = torch.sqrt(target_var + 1e-8) |
|
|
|
|
|
normalized = ((target_channel - target_mean) / target_std) * source_std + source_mean |
|
|
|
|
|
result = torch.lerp(target_channel, normalized, self.factor) |
|
|
|
return result |
|
|
|
return result_channel |
|
|
|
def _install_package(self, package_name): |
|
"""Install a package using pip.""" |
|
import subprocess |
|
subprocess.check_call([sys.executable, "-m", "pip", "install", package_name]) |
|
|
|
|
|
def create_comparison_figure(original_img, matched_img, title="Color Matching Comparison"): |
|
""" |
|
Create a matplotlib figure with the original and color-matched images. |
|
|
|
Args: |
|
original_img: Original PIL Image |
|
matched_img: Color-matched PIL Image |
|
title: Title for the figure |
|
|
|
Returns: |
|
matplotlib Figure |
|
""" |
|
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5)) |
|
|
|
ax1.imshow(original_img) |
|
ax1.set_title("Original") |
|
ax1.axis('off') |
|
|
|
ax2.imshow(matched_img) |
|
ax2.set_title("Color Matched") |
|
ax2.axis('off') |
|
|
|
plt.suptitle(title) |
|
plt.tight_layout() |
|
|
|
return fig |
|
|
|
def coral(source, target): |
|
""" |
|
CORAL (Color Transfer using Correlated Color Temperature) implementation. |
|
Based on the original ColorMatchImage approach. |
|
|
|
Args: |
|
source: Source image tensor [C, H, W] (to be color matched) |
|
target: Target image tensor [C, H, W] (reference for color matching) |
|
|
|
Returns: |
|
Color-matched source image tensor [C, H, W] |
|
""" |
|
|
|
source = source.float() |
|
target = target.float() |
|
|
|
|
|
C, H, W = source.shape |
|
source_flat = source.view(C, -1) |
|
target_flat = target.view(C, -1) |
|
|
|
|
|
source_mean = torch.mean(source_flat, dim=1, keepdim=True) |
|
target_mean = torch.mean(target_flat, dim=1, keepdim=True) |
|
|
|
|
|
source_centered = source_flat - source_mean |
|
target_centered = target_flat - target_mean |
|
|
|
|
|
N = source_centered.shape[1] |
|
source_cov = torch.mm(source_centered, source_centered.t()) / (N - 1) |
|
target_cov = torch.mm(target_centered, target_centered.t()) / (N - 1) |
|
|
|
|
|
eps = 1e-5 |
|
source_cov += eps * torch.eye(C, device=source.device) |
|
target_cov += eps * torch.eye(C, device=source.device) |
|
|
|
try: |
|
|
|
|
|
|
|
|
|
source_chol = torch.linalg.cholesky(source_cov) |
|
target_chol = torch.linalg.cholesky(target_cov) |
|
|
|
|
|
|
|
|
|
source_chol_inv = torch.linalg.inv(source_chol) |
|
transform_matrix = torch.mm(target_chol, source_chol_inv) |
|
|
|
|
|
result_centered = torch.mm(transform_matrix, source_centered) |
|
result_flat = result_centered + target_mean |
|
|
|
|
|
result = result_flat.view(C, H, W) |
|
|
|
|
|
result = torch.clamp(result, 0.0, 1.0) |
|
|
|
return result |
|
|
|
except Exception as e: |
|
|
|
print(f"CORAL Cholesky failed, using simple statistics matching: {e}") |
|
|
|
|
|
source_std = torch.std(source_centered, dim=1, keepdim=True) |
|
target_std = torch.std(target_centered, dim=1, keepdim=True) |
|
|
|
|
|
source_std = torch.clamp(source_std, min=eps) |
|
|
|
|
|
result_flat = (source_centered / source_std) * target_std + target_mean |
|
result = result_flat.view(C, H, W) |
|
|
|
|
|
result = torch.clamp(result, 0.0, 1.0) |
|
|
|
return result |