mocap-ai / labeler /data_setup.py
Natsha's picture
Added docs and fixed the math for the data augmentation.
0514416
from pathlib import Path
from typing import Tuple, List, Union
from random import randint
import h5py
import numpy as np
import torch
from torch import Tensor
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import fbx_handler
import utils
def apply_y_rotation(point_cloud_data: Tensor, angle: float = None, device: str = 'cuda') -> Tensor:
"""
Apply a random rotation to the point cloud.
:param point_cloud_data: `Tensor` of shape (3, 73) to modify.
:param angle: Angle as `float` in degrees to rotate the point cloud. If this is given, the rotation is not random.
:param device: `str` device on which to create the extra tensors.
:return: Modified `Tensor`.
"""
# Convert the random angle from degrees to radians.
if angle is None:
# If no angle is given, use a random angle between -180 and 180.
angle = (torch.rand(1).item() * 2 - 1) * 180 * torch.tensor(torch.pi / 180, device=device)
else:
# If an angle is given, convert this angle instead.
angle *= torch.tensor(torch.pi / 180, device=device)
# Transpose the point_cloud_data from (3, 73) to (73, 3) so we can use torch.matmul.
point_cloud_data = point_cloud_data.transpose(1, 0)
# Create the rotation matrix for the y-axis
rotation_matrix = torch.tensor([
[torch.cos(angle), 0, torch.sin(angle)],
[0, 1, 0],
[-torch.sin(angle), 0, torch.cos(angle)]], device=device)
# Apply the rotation to the point cloud data and reverse the transpose to get back to the original shape (3, 73).
return torch.matmul(point_cloud_data, rotation_matrix).transpose(1, 0)
def fill_1d_tensor_with_zeros(point_cloud: Tensor, pc_size: int = 1024, device: str = 'cuda') -> Tensor:
"""
Fill a 1D tensor with zeros, so it is as long as pc_size.
:param point_cloud: `Tensor` of shape (73,) to add zeros to.
:param pc_size: `int` amount of points that need to be in the final tensor in total.
:param device: `str` device on which to create the extra tensors.
:return: `Tensor` of shape (pc_size,).
"""
length = len(point_cloud)
if length < pc_size:
zeros = torch.zeros(pc_size - length, dtype=torch.int, device=device)
point_cloud = torch.cat((point_cloud, zeros), dim=0)
# Since we don't check if the length is longer than pc_size, always return the tensor with the pc_size slice.
return point_cloud[:pc_size]
def fill_frames_tensor(point_cloud: Tensor, pc_size: int = 1024, filler: int = -1, device: str = 'cuda') -> Tensor:
"""
Fill a 1D tensor with ones, so it is as long as pc_size.
:param point_cloud: `Tensor` of shape (73,) to add `int` -1s to.
:param pc_size: `int` amount of points that need to be in the final tensor in total.
:param filler: `int` value to fill the remainder of the tensor with.
:param device: `str` device on which to create the extra tensors.
:return: `Tensor` of shape (pc_size,).
"""
length = len(point_cloud)
if length < pc_size:
zeros = torch.full((pc_size - length,), filler, dtype=torch.int, device=device)
point_cloud = torch.cat((point_cloud, zeros), dim=0)
# Since we don't check if the length is longer than pc_size, always return the tensor with the pc_size slice.
return point_cloud[:pc_size]
def convert_max_overlap(max_overlap: Union[Tuple[float, float, float], float]) -> Tuple[float, float, float]:
"""
Convert the argument max_overlap to a float tuple of length 3.
:param max_overlap: Either 3 floats or 1 float.
:return: If max_overlap is 3 floats, returns max_overlap unchanged.
If it is 1 `float`, returns a tuple of size 3 of that `float`.
"""
if isinstance(max_overlap, float):
return max_overlap, max_overlap, max_overlap
if len(max_overlap) != 3:
raise ValueError(f'max_overlap must be a tuple of length 3, not {len(max_overlap)}.')
return max_overlap
def convert_n_samples(n_samples: Union[int, float], _max: int) -> int:
"""
Convert the argument n_samples to an `int` that serves as a total samples amount.
:param n_samples: Either a `float` (representing a ratio) or an `int` (representing a number of samples).
:param _max: `int` that indicates the highest possible n_samples.
:return: An int that is never higher than _max.
"""
# If n_samples is between 0-1, it is considered a ratio, and we calculate the amount of rows to use.
if isinstance(n_samples, float):
n_samples = int(n_samples * _max)
# If n_samples is negative, subtract the amount from the total amount of rows.
elif n_samples < 0:
n_samples = _max - n_samples
# If n_samples is 0, use all rows.
elif n_samples == 0 or n_samples > _max:
n_samples = _max
return n_samples
def plot_point_cloud(point_cloud: Tensor, scale: Union[int, float] = 50):
tensor = point_cloud.cpu().numpy()
# Extract x, y, and z coordinates from the tensor
x = tensor[:, 0]
y = tensor[:, 1]
z = tensor[:, 2]
# Create a 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Scatter plot
ax.scatter(x, y, z, s=scale)
# Set axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim([-0.5, 0.5])
ax.set_ylim([-0.5, 0.5])
ax.set_zlim([-0.5, 0.5])
ax.zaxis._axinfo['juggled'] = (1, 1, 0)
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Show the plot
plt.show()
def compare_point_clouds(existing, title='plot'):
colors = plt.cm.jet(np.linspace(0, 1, len(existing)))
n_tensors = len(existing)
plt.figure(figsize=(10, 7))
for idx, tensor in enumerate(existing):
tensor = tensor.cpu().numpy()
# Extract the first and third elements
x_coords = tensor[0]
z_coords = tensor[2]
# Create a scatter plot
plt.scatter(x_coords, z_coords, c=colors[idx], label=f'Tensor {idx + 1}', s=1)
plt.show()
def fill_translation_cloud(translations: Tensor, n_points: int = 1024, augment=torch.rand,
apply_shuffle: bool = True, shuffle: Tensor = None, device: str = 'cuda') \
-> Tuple[Tensor, Tensor]:
"""
Fill a translation tensor with filler data, so it is as long as pc_size.
:param translations: `Tensor` of shape (3, xxx).
:param n_points: `int` amount of total points that need to be in the output.
:param augment: Torch filler function to use for generating filler points, default `torch.rand`.
:param apply_shuffle: `bool` whether to shuffle the output.
:param shuffle: `Tensor` that contains a shuffled index order that needs to be used for shuffling.
This does nothing if apply_shuffle is False.
:param device: `str` device on which to create the extra tensors.
:return: Translation and shuffle tuple of `Tensor` of shape (3, n_points), and (n_points,).
"""
# Use the second dimension as the length of the translation tensor, due to input shape (3, 73..).
length = translations.shape[1]
# Only create filler data if the length is shorter than the amount of points.
if length < n_points:
# Calculate the shape of the extra tensor, and pass it to the given augment function.
dif = (translations.shape[0], n_points - length)
extra = augment(dif, device=device)
# Concatenate all values together to get shape (3, pc_size).
translations = torch.cat((translations, extra), dim=1)
else:
translations = translations[:, :n_points]
# Shuffle if needed.
if apply_shuffle:
if shuffle is None:
shuffle = torch.randperm(n_points, device=device)
translations = torch.index_select(translations, 1, shuffle)
return translations, shuffle
def fill_point_clouds(actor_classes: Tensor, marker_classes: Tensor, translations: Tensor, frames: Tensor,
n_points: int = 1024, augment=torch.rand, apply_shuffle: bool = True, shuffle: Tensor = None,
device: str = 'cuda') \
-> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Fill a point cloud with filler data, so it is as long as pc_size.
:param actor_classes: `Tensor` of shape (n_points,) that contains the actor classes.
:param marker_classes: `Tensor` of shape (n_points,) that contains the marker classes.
:param translations: `Tensor` of shape (3, n_points) that contains the marker translations.
:param frames: `Tensor` of shape (n_points,) that contains the animated frames.
:param n_points: `int` amount of total points that need to be in the output.
:param augment: Torch filler function to use for generating filler points, default `torch.rand`.
:param apply_shuffle: `bool` whether to shuffle the output.
:param shuffle: `Tensor` that contains a shuffled index order that needs to be used for shuffling. This does nothing if apply_shuffle is False.
:param device: `str` device on which to create the extra tensors.
:return: Tuple of `Tensor` of shape (n_points,), (n_points,), (3,n_points,), (n_points,), (n_points,)
that represent the actor classes, marker classes, translations, animated frames and the shuffled indices used.
"""
# Use simple functions to create full tensors for the actors/markers/frames.
actor_classes = fill_1d_tensor_with_zeros(actor_classes, n_points, device=device)
marker_classes = fill_1d_tensor_with_zeros(marker_classes, n_points, device=device)
frames = fill_frames_tensor(frames, n_points, device=device)
# Extend the translation tensor.
length = translations.shape[1]
if length < n_points:
dif = (3, n_points - length)
extra = augment(dif, device=device)
# Concatenate all values together to get shape (pc_size,).
translations = torch.cat((translations, extra), dim=1)
else:
translations = translations[:, :n_points]
# Shuffle if needed.
if apply_shuffle:
if shuffle is None:
shuffle = torch.randperm(n_points, device=device)
actor_classes = torch.index_select(actor_classes, 0, shuffle)
marker_classes = torch.index_select(marker_classes, 0, shuffle)
translations = torch.index_select(translations, 1, shuffle)
frames = torch.index_select(frames, 0, shuffle)
# Returns a list of tensors of shape (n_points,), (n_points,), (3, n_points), (n_points,).
return actor_classes, marker_classes, translations, frames, shuffle
def remove_inf_markers(labeled: np.ndarray, device: str = 'cuda'):
"""
Goes through the labeled data and removes all markers that have inf features. This will also scale the translations.
:param labeled: `np.ndarray` of shape (15, n_points) that contains the labeled data.
:param device: `str` device on which to create the extra tensors.
:return: Tuple of `tensor` that represent actors/markers/scaled translations/unscaled translations/frames.
"""
# Check if the second feature (tx) is inf. This means it had no keyframe,
# and the NN should not classify this to avoid the network learning interpolated markers.
# Mask is True if it had a keyframe.
mask = ~np.isinf(labeled[2])
# Make tensors from the np arrays.
actor_cloud = torch.tensor(labeled[0][mask], dtype=torch.int, device=device)
marker_cloud = torch.tensor(labeled[1][mask], dtype=torch.int, device=device)
unscaled_t_cloud = labeled[2:5][:, mask]
frames = torch.tensor(labeled[-1][mask], dtype=torch.int, device=device)
# Scale the translations into a separate tensor.
scaled_t_cloud = fbx_handler.scale_translations(unscaled_t_cloud)
scaled_t_cloud = torch.tensor(scaled_t_cloud, dtype=torch.float32, device=device)
# After the scaled_t_cloud is made, we can convert the unscaled_t_cloud to a tensor too.
unscaled_t_cloud = torch.tensor(unscaled_t_cloud, dtype=torch.float32, device=device)
return actor_cloud, marker_cloud, scaled_t_cloud, unscaled_t_cloud, frames
def apply_translation(point_cloud: Tensor, t: float = 1.0, device: str = 'cuda') -> Tensor:
"""
Apply a translation to all axes of a point cloud.
:param point_cloud: `Tensor` of shape (3, n_points) that contains the point cloud.
:param t: `float` that represents the translation.
:param device: `str` device on which to create the extra tensors.
:return: `Tensor` of shape (3, n_points) that contains the point cloud with the translation applied.
"""
point_cloud[0] += torch.tensor(t, device=device)
point_cloud[1] += torch.tensor(t, device=device)
point_cloud[2] += torch.tensor(t, device=device)
return point_cloud
class TrainDataset(Dataset):
def __init__(self, file: Union[Path, np.array],
n_samples: Union[int, float] = 1.0,
n_attempts: int = 10,
pc_size: int = 1024,
max_actors: int = 8,
use_random_max_actors: bool = True,
use_random_translation: bool = True,
use_random_rotation: bool = True,
shuffle_markers: bool = True,
translation_factor: float = 0.9,
max_overlap: Union[Tuple[float, float, float], float] = (0.2, 0.2, 0.2),
augment=torch.rand,
debug: int = -1,
device: str = 'cuda'):
self.debug = debug
self.device = device
# If the pc_size is a number under 73, we intend to use it as a multiplication.
if pc_size < 73:
pc_size *= 73
elif pc_size < max_actors * 73:
raise ValueError(f'pc_size must be large enough to contain 73 markers for {max_actors} actors '
f'({pc_size}/{max_actors * 73}).')
# Store most arguments as class properties, so they don't have to be passed to each function.
# These will all be deleted after the dataset is created.
self.n_attempts = n_attempts
self.pc_size = pc_size
self.max_actors = max_actors
self.shuffle_markers = shuffle_markers
self.translation_factor = translation_factor
self.max_overlap = convert_max_overlap(max_overlap)
# Isolate the dependent and independent variables.
if isinstance(file, np.ndarray):
self.all_data = file
else:
self.all_data = utils.h5_to_array4d(file)
# Shape (n_frames, 15, 73).
self.all_data = torch.tensor(self.all_data, dtype=torch.float32, device=device)
self.n_samples = convert_n_samples(n_samples, self.all_data.shape[0])
self._print(f'Loaded in {len(self.all_data)} poses, with n_samples = {n_samples}.', 0)
# Generate a random permutation of indices.
self.random_indices = torch.randperm(len(self.all_data))
self.random_idx = 0
# Initiate empty lists for all the different types of data.
actor_classes, marker_classes, translations, frames = [], [], [], []
# For each sample, create a random point cloud.
for _ in range(self.n_samples):
cur_max_actors = randint(1, max_actors) if use_random_max_actors else max_actors
actor_cloud, marker_cloud, translation_cloud, fs = self.create_sample(cur_max_actors,
use_random_rotation,
use_random_translation, augment)
actor_classes.append(actor_cloud)
marker_classes.append(marker_cloud)
translations.append(translation_cloud)
frames.append(fs)
# (n_samples, pc_size), (n_samples, pc_size), (n_samples, 3, pc_size), (n_samples,pc_size).
self.actor_classes = torch.stack(actor_classes)
self.marker_classes = torch.stack(marker_classes)
self.translations = torch.stack(translations)
self.frames = torch.stack(frames)
# Delete class properties that were only needed to create the dataset.
del self.pc_size, self.max_actors, self.shuffle_markers, self.translation_factor, self.n_samples, \
self.max_overlap, self.all_data, self.random_indices, self.random_idx, self.n_attempts
def _print(self, txt: str, lvl: int = 0) -> None:
if lvl <= self.debug:
print(txt)
def create_sample(self, max_actors: int, use_random_rotation: bool = True,
use_random_translation: bool = True, augment=torch.rand) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""
Create a random point cloud from the dataset.
:param max_actors: `int` amount of actors to aim for in this point cloud. Any missing markers will be filled.
:param use_random_rotation: `bool` whether to apply a random rotation to each actor's point cloud.
:param use_random_translation: `bool` whether to apply a random translation to each actor's point cloud.
:param augment: Torch function to use for the filler markers. Examples are `torch.rand`, `torch.ones`, etc.
:return: A tuple of tensors containing the actor point cloud, marker point cloud, and translation point cloud.
"""
# Loop through all cur_max_actors, select a row from all_data, and concatenate it to the t_cloud.
actor_cloud, marker_cloud, t_cloud, frames = [], [], [], []
# For each actor, try 10 times to find a point cloud that does not overlap the accumulated cloud.
# If it fails all times, we will just have fewer actors in the point cloud.
for actor_idx in range(max_actors):
for attempt in range(self.n_attempts):
# In case we ever have lots of attempts, reset the random index if we have reached the end of the data.
if self.random_idx == len(self.all_data):
self.random_idx = 0
# Get a pose from the tensor using the shuffled index; shape (1, 14, 73).
row = self.all_data[self.random_indices[self.random_idx]]
self.random_idx += 1
# Collect relevant data from the row.
# Shapes: (73,).
a = row[0].to(torch.int)
m = row[1].to(torch.int)
f = row[-1].to(torch.int)
# Shape (3, 73).
t = row[2:5]
# Apply random rotation and translations if needed.
if use_random_rotation:
t = apply_y_rotation(t, device=self.device)
if use_random_translation:
t = self.apply_random_translation(t)
self._print(f'Checking overlap for {actor_idx} - {attempt}', 1)
if does_overlap(t_cloud, t, max_overlap=self.max_overlap):
# If the clouds overlap too much, we continue to the next attempt without adding this one.
print(f'Actor {actor_idx + 1} attempt {attempt + 1} failed.')
continue
# Add data to their respective lists if the clouds don't overlap.
actor_cloud.append(a)
marker_cloud.append(m)
t_cloud.append(t)
frames.append(f)
self._print(f'Actor {actor_idx + 1} attempt {attempt + 1} succeeded.', 1)
# If the clouds don't overlap too much,
# we break the loop because this attempt worked, and we don't need another one.
break
self._print(f'Total length: {len(t_cloud)}/{max_actors}', 0)
# Add all lists together to create long tensors.
# Shape (n_actors * 73,).
actor_cloud = torch.cat(actor_cloud, dim=0)
marker_cloud = torch.cat(marker_cloud, dim=0)
frames = torch.cat(frames, dim=0)
# Shape (3, n_actors * 73).
t_cloud = torch.cat(t_cloud, dim=1)
# Fill the clouds with more markers to get to pc_size.
# (1024,), (1024,), (1024, 3), (1024,).
actor_cloud, marker_cloud, t_cloud, frames, _ = fill_point_clouds(
actor_cloud, marker_cloud, t_cloud, frames, n_points=self.pc_size,
augment=augment, apply_shuffle=self.shuffle_markers, device=self.device)
return actor_cloud, marker_cloud, t_cloud, frames
def apply_random_translation(self, point_cloud: Tensor) -> Tensor:
"""
Apply random translation to the point cloud.
:param point_cloud: `Tensor` of shape (3, n_points).
:return: Translated `Tensor` of shape (3, n_points).
"""
x_translation = (torch.rand(1).item() - 0.5) * self.translation_factor
z_translation = (torch.rand(1).item() - 0.5) * self.translation_factor
point_cloud[0] += torch.tensor(x_translation, device=self.device)
point_cloud[2] += torch.tensor(z_translation, device=self.device)
return point_cloud
def __getitem__(self, index):
return self.actor_classes[index], self.marker_classes[index], self.translations[index], self.frames[index]
def __len__(self):
return len(self.actor_classes)
class InfDataset(Dataset):
def __init__(self, source: Union[Path, Tuple[np.ndarray, np.ndarray]],
pc_size: int = 1024,
n_samples: Union[int, float] = 1.0,
augment=torch.rand,
shuffle_markers: bool = False,
debug: int = -1,
device: str = 'cuda') -> None:
self.device = device
self.debug = debug
if isinstance(source, np.ndarray):
labeled_data, unlabeled_data = source
else:
# if isinstance(source, Path):
# # if source.stem == 'ALL':
# # self.data = utils.combined_test_h5_to_array4d(source, pc_size)
# # else:
with h5py.File(source, 'r') as h5f:
labeled_data = np.array(h5f['labeled'])[:5]
unlabeled_data = np.array(h5f['unlabeled'])[:5]
# self.data = utils.merge_labeled_and_unlabeled_data(labeled_data, unlabeled_data, pc_size, augment)
# else:
# labeled_data, unlabeled_data = source
self.assemble_data(augment, labeled_data, unlabeled_data, pc_size, n_samples, shuffle_markers)
self._print(f'Actors: {self.actor_classes.shape}, markers: {self.marker_classes.shape}, '
f'translations: {self.translations.shape}', 0)
self._print(self.actor_classes[:, :10], 0)
self._print(self.marker_classes[:, :10], 0)
self._print(self.translations[:, :, :10], 0)
self._print(self.unscaled_translations[:, :, :10], 0)
self._print(self.frames[:, :10], 0)
def _print(self, txt: str, lvl: int = 0) -> None:
if lvl <= self.debug:
print(txt)
def assemble_data(self, augment, labeled_data: np.ndarray, unlabeled_data: np.ndarray, pc_size: int = 1024,
n_samples: int = 5, shuffle_markers: bool = False):
"""
Assemble the various tensors.
:param augment: Torch function to use for the filler markers. Examples are `torch.rand`, `torch.ones`, etc.
:param labeled_data: `np.ndarray` that contains the data of the labeled markers.
:param unlabeled_data: `np.ndarray` that contains the data of the unlabeled markers.
:param pc_size: `int` amount of points to put in the point cloud.
:param n_samples: Total amount of samples to generate.
:param shuffle_markers: `bool` whether to shuffle the markers in the point cloud.
"""
n_samples = convert_n_samples(n_samples, len(labeled_data))
# Initialize empty lists to store the data in.
actor_classes, marker_classes, translations, unscaled_translations, frames = [], [], [], [], []
for frame in range(n_samples):
labeled = labeled_data[frame]
unlabeled = unlabeled_data[frame]
actor_cloud, marker_cloud, scaled_t_cloud, unscaled_t_cloud, l_frames = remove_inf_markers(
labeled, device=self.device)
ul_actor_cloud, ul_marker_cloud, ul_scaled_t_cloud, ul_unscaled_t_cloud, ul_frames = \
remove_inf_markers(unlabeled, device=self.device)
merged_actors = torch.cat([actor_cloud, ul_actor_cloud], dim=0)
merged_markers = torch.cat([marker_cloud, ul_marker_cloud], dim=0)
merged_translations = torch.cat([scaled_t_cloud, ul_scaled_t_cloud], dim=1)
merged_unscaled_translations = torch.cat([unscaled_t_cloud, ul_unscaled_t_cloud], dim=1)
merged_frames = torch.cat([l_frames, ul_frames], dim=0)
# fill_point_clouds() uses the augment function to fill the point clouds, so we can't use it to
# fill the unscaled translations.
actor_cloud, marker_cloud, scaled_t_cloud, merged_frames, shuffled_idx = \
fill_point_clouds(merged_actors, merged_markers, merged_translations, merged_frames,
n_points=pc_size, augment=augment, apply_shuffle=shuffle_markers, device=self.device)
# use fill_translation_cloud to fill the unscaled translations.
# This is a separate function because fill_point_clouds() is also used in the TrainDataset class.
merged_unscaled_translations, _ = fill_translation_cloud(merged_unscaled_translations, n_points=pc_size,
augment=augment, apply_shuffle=shuffle_markers,
shuffle=shuffled_idx, device=self.device)
actor_classes.append(actor_cloud)
marker_classes.append(marker_cloud)
translations.append(scaled_t_cloud)
unscaled_translations.append(merged_unscaled_translations)
frames.append(merged_frames)
# (n_samples, pc_size), (n_samples, pc_size), (n_samples, 3, pc_size).
self.actor_classes = torch.stack(actor_classes)
self.marker_classes = torch.stack(marker_classes)
self.translations = torch.stack(translations)
self.unscaled_translations = torch.stack(unscaled_translations)
self.frames = torch.stack(frames)
def __getitem__(self, index):
return self.actor_classes[index], self.marker_classes[index], \
self.translations[index], self.unscaled_translations[index], self.frames[index]
def __len__(self):
return len(self.actor_classes)
def does_overlap(accumulated_point_cloud: List[Tensor], new_point_cloud: Tensor,
max_overlap: Tuple[float, float, float] = (0.2, 0.2, 0.2)) -> bool:
"""
Checks if a new point cloud overlaps with any of the existing point clouds.
:param accumulated_point_cloud: List of `Tensor` of the accumulated point clouds.
:param new_point_cloud: `Tensor` point cloud to check overlap for.
:param max_overlap: Tuple of 3 floats to indicate allowed overlapping thresholds for each axis.
:return: `bool` whether the new point cloud overlaps with any of the existing point clouds.
"""
def get_bounding_box(points: Tensor) -> Tuple[Tensor, Tensor]:
"""
Gets the bounding box values (min, max) for each axis.
:param points: `Tensor` point cloud to analyze.
:return: Tuple of `Tensor` of minimum and maximum values.
"""
min_values, _ = torch.min(points, dim=1)
max_values, _ = torch.max(points, dim=1)
return min_values, max_values
def check_dimensional_overlap(bb1_min: Tensor, bb1_max: Tensor, bb2_min: Tensor, bb2_max: Tensor,
overlap_threshold: float = 0.2) -> bool:
"""
Checks if two bounding boxes overlap in one axis.
:param bb1_min: `Tensor` of minimum value for the first bounding box.
:param bb1_max: `Tensor` of maximum value for the first bounding box.
:param bb2_min: `Tensor` of minimum value for the second bounding box.
:param bb2_max: `Tensor` of maximum value for the second bounding box.
:param overlap_threshold: `float` that indicates the maximum % of overlap allowed for this axis.
:return: `bool` whether the two bounding boxes overlap.
"""
# Find the highest bbox minimum and the lowest bbox maximum.
overlap_min = torch.maximum(bb1_min, bb2_min)
overlap_max = torch.minimum(bb1_max, bb2_max)
# Calculate the overlap length. If the bounding boxes don't overlap, this length will be negative.
# Then we can return False right away.
overlap_length = overlap_max - overlap_min
if overlap_length <= 0:
return False
# Given that the overlap length is a positive number, we need to calculate how much overlap is happening.
# First find the outer bounds of the both bounding boxes (lowest minimum and highest maximum).
non_overlap_min = torch.minimum(bb1_min, bb2_min)
non_overlap_max = torch.maximum(bb1_max, bb2_max)
# Then calculate what fraction of the total length is the overlapping length.
total_length = non_overlap_max - non_overlap_min
overlap_ratio = overlap_length / total_length
# Return whether this ratio is higher than the allowed threshold.
return overlap_ratio > overlap_threshold
def check_3dimensional_overlap(bb1_min: Tensor, bb1_max: Tensor, bb2_min: Tensor, bb2_max: Tensor,
overlap_thresholds: Tuple[float, float, float]) -> bool:
"""
Checks if two 3-dimensional bounding boxes overlap in the x and z axis.
:param bb1_min: `Tensor` of minimum values for the first bounding box.
:param bb1_max: `Tensor` of maximum values for the first bounding box.
:param bb2_min: `Tensor` of minimum values for the second bounding box.
:param bb2_max: `Tensor` of maximum values for the second bounding box.
:param overlap_thresholds: Tuple of 3 `float` that indicates the maximum % of overlap allowed for all axes.
:return: `bool` whether the two bounding boxes overlap.
"""
x_overlap = check_dimensional_overlap(bb1_min[0], bb1_max[0], bb2_min[0], bb2_max[0], overlap_thresholds[0])
z_overlap = check_dimensional_overlap(bb1_min[2], bb1_max[2], bb2_min[2], bb2_max[2], overlap_thresholds[2])
# EXTRA: Check if the y axes are overlapping.
return x_overlap and z_overlap
# If this is the first attempt of checking an overlap, the accumulated point cloud is empty,
# so we don't need to check any overlap.
if not accumulated_point_cloud:
return False
# Find the bounding box values of the new point cloud.
new_min, new_max = get_bounding_box(new_point_cloud)
overlaps = []
# Iterate through each point cloud in the accumulated list.
for idx, pc in enumerate(accumulated_point_cloud):
# Get the bounding box for the current cloud.
current_min, current_max = get_bounding_box(pc)
# Check if the new point cloud overlaps with the current cloud.
overlaps.append(check_3dimensional_overlap(current_min, current_max, new_min, new_max, max_overlap))
# If any axis of any point cloud overlapped, we don't want to add the point cloud.
return any(overlaps)
if __name__ == '__main__':
# train_dataset = TrainDataset(Path(r'G:\Firestorm\mocap-ai\data\h5\mes-1\train\IntroVideo_04_006.h5'),
# n_samples=1,
# max_actors=2,
# pc_size=2,
# use_random_max_actors=False,
# use_random_translation=True,
# use_random_rotation=False,
# shuffle_markers=False,
# max_overlap=.9)
# print(dir(train_dataset))
test_dataset = InfDataset(Path(r'G:\Firestorm\mocap-ai\data\h5\mes-1\test\HangoutSpot_1_001.h5'),
pc_size=150,
shuffle_markers=False,
debug=0)