import pandas as pd import numpy as np from pathlib import Path import contextlib import fbx from typing import List, Union # Import custom data. import globals class MarkerData: # TODO: Model is currently built for training. Add testing mode. def __init__(self, fbx_file: Path): """ Class that stores references to important nodes in an FBX file. Offers utility functions to quickly load animation data. :param fbx_file: `str` Path to the file to load. """ self.time_modes = globals.get_time_modes() self.marker_names = globals.get_marker_names() self.markers = [] self.actor_names = [] self.actors = [] self.volume_dim_x = 10. self.volume_dim_y = 4. self.fbx_file = fbx_file self.valid_frames = [] self.__init_scene() self.__init_anim() self.__init_actors() self.__init_markers() def __init_scene(self): # Create an FBX manager and importer manager = fbx.FbxManager.Create() importer = fbx.FbxImporter.Create(manager, '') # Import the FBX file importer.Initialize(str(self.fbx_file)) self.scene = fbx.FbxScene.Create(manager, '') importer.Import(self.scene) self.root = self.scene.GetRootNode() self.time_mode = self.scene.GetGlobalSettings().GetTimeMode() # Destroy importer to remove reference to imported file. # This will allow us to delete the uploaded file. importer.Destroy() def __init_anim(self): # Get the animation stack and layer. anim_stack = self.scene.GetCurrentAnimationStack() self.anim_layer = anim_stack.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), 0) # Find the total number of frames to expect from the local time span. local_time_span = anim_stack.GetLocalTimeSpan() self.num_frames = int(local_time_span.GetDuration().GetFrameCount(self.time_mode)) def __init_actors(self): # Find all parent nodes (/System, /_Unlabeled_Markers, /Actor1, etc). gen1_nodes = [self.root.GetChild(i) for i in range(self.root.GetChildCount())] for gen1_node in gen1_nodes: gen2_nodes = [gen1_node.GetChild(i) for i in range(gen1_node.GetChildCount())] # Actor nodes (/Mimi/Hips, /Mimi/ARIEL, etc) # If the first 3 marker names are children of this parent, it must be an actor. if all(name in [node.GetName() for node in gen2_nodes] for name in self.marker_names[:4]): self.actor_names.append(gen1_node.GetName()) self.actors.append(gen1_node) self.actor_count = len(self.actors) self.valid_frames = [[] for _ in range(self.actor_count)] def __init_markers(self): for actor_node in self.actors: actor_markers = {} for marker_name in self.marker_names: for actor_idx in range(actor_node.GetChildCount()): child = actor_node.GetChild(actor_idx) child_name = child.GetName() if child_name == marker_name: actor_markers[child_name] = child assert len(actor_markers) == len(self.marker_names), f'{actor_node.GetName()} does not have all markers.' self.markers.append(actor_markers) def _check_actor(self, actor: int = 0): assert 0 <= actor <= self.actor_count, f'Actor number must be between 0 and {self.actor_count - 1}. ' \ f'It is {actor}.' def _set_valid_frames_for_actor(self, actor: int = 0): self._check_actor(actor) frames = list(range(self.num_frames)) for marker_name in self.marker_names: marker = self.markers[actor][marker_name] t_curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X') keys = [t_curve.KeyGet(i).GetTime().GetFrameCount(self.time_mode) for i in range(t_curve.KeyGetCount())] for frame in frames: if frame not in keys: with contextlib.suppress(ValueError): frames.remove(frame) self.valid_frames[actor] = frames def _check_valid_frames(self, actor: int = 0): if not len(self.valid_frames[actor]): self._set_valid_frames_for_actor(actor) def _modify_pose(self, actor, frame) -> List[float]: # Set new frame to evaluate at. time = fbx.FbxTime() time.SetFrame(frame) # Prepare arrays for each axis. x, y, z = [], [], [] # For each marker, store the x, y and z global position. for n, m in self.markers[actor].items(): t = m.EvaluateGlobalTransform(time).GetRow(3) x += [t[0] * 0.01] y += [t[1] * 0.01] z += [t[2] * 0.01] # Move the point cloud to the center of the x and y axes. This will put the actor in the middle. x = self.center_axis(x) z = self.center_axis(z) # Move the actor to the middle of the volume floor by adding volume_dim_x/2 to x and z. x += self.volume_dim_x / 2. z += self.volume_dim_x / 2. # Squeeze the actor into the 1x1 plane for the neural network by dividing the axes. x /= self.volume_dim_x z /= self.volume_dim_x y = np.array(y) / self.volume_dim_y # TODO: Optionally: Add any extra modifications to the point cloud here. # Append all values to a new array, one axis at a time. # This way it will match the column names order. pose = [] for i in range(len(x)): pose += [x[i]] pose += [y[i]] pose += [z[i]] return pose def get_marker_by_name(self, actor: int, name: str): self._check_actor(actor) return self.markers[actor][name] def get_valid_frames_for_actor(self, actor: int = 0): self._check_valid_frames(actor) return self.valid_frames[actor] def print_valid_frames_stats_for_actor(self, actor: int = 0): self._check_actor(actor) self._check_valid_frames(actor) len_valid = len(self.valid_frames[actor]) ratio = (len_valid / self.num_frames) * 100 print(f'Actor {self.actor_names[actor]}: Total: {self.num_frames}, valid: {len_valid}, missing: ' f'{self.num_frames - len_valid}, ratio: {ratio:.2f}% valid.') return self.actor_names[actor], self.num_frames, len_valid, ratio def columns_from_joints(self): columns = [] for name in self.marker_names: columns += [f'{name}x', f'{name}y', f'{name}z'] return columns @staticmethod def center_axis(a) -> np.array: a = np.array(a) _min = np.min(a) _max = np.max(a) _c = _max - _min a -= _c return a def extract_translations_per_actor(self, actor: int = 0): self._check_actor(actor) self._check_valid_frames(actor) poses = [] # Go through all valid frames for this actor. # Note that these frames can be different per actor. for frame in self.valid_frames[actor]: # Get the centered point cloud as an array. pose_at_frame = self._modify_pose(actor, frame) poses.append(pose_at_frame) return poses def extract_all_translations(self) -> pd.DataFrame: columns = self.columns_from_joints() all_poses = [] for i in range(self.actor_count): all_poses.extend(self.extract_translations_per_actor(i)) return pd.DataFrame(all_poses, columns=columns) def export(self, t: str = 'csv', output_file: Path = None) -> Union[bytes, Path]: # Get the dataframe with all animation data. df = self.extract_all_translations() if t == 'string': return df.to_csv(index=False).encode('utf-8') if output_file is None: output_file = self.fbx_file.with_suffix('.csv') if output_file.suffix != '.csv': raise ValueError(f'{output_file} needs to be a .csv file.') df.to_csv(output_file, index=False) return output_file