Natsha commited on
Commit
6b3d7b3
·
1 Parent(s): d82ff49

Added functionality to convert a frame range to a timeline dense point cloud.

Browse files

The function get_timeline_dense_cloud() will return this as a np.array.
Also added doc strings for all functions.
Finally, renamed read_fbx.py to fbx_handler.py and moved it to the main directory because this class will be handling everything related to the FBX SDK.

Files changed (3) hide show
  1. app.py +2 -2
  2. fbx_handler.py +564 -0
  3. labeler/read_fbx.py +0 -238
app.py CHANGED
@@ -7,11 +7,11 @@ from pathlib import Path
7
  import streamlit as st
8
 
9
  # Import custom libs.
10
- from labeler import read_fbx
11
 
12
 
13
  def process_file(file: Path) -> bytes:
14
- fbx_content = read_fbx.MarkerData(file)
15
  return fbx_content.export(t='string')
16
 
17
 
 
7
  import streamlit as st
8
 
9
  # Import custom libs.
10
+ import fbx_handler
11
 
12
 
13
  def process_file(file: Path) -> bytes:
14
+ fbx_content = fbx_handler.FBXContainer(file)
15
  return fbx_content.export(t='string')
16
 
17
 
fbx_handler.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import core libs.
2
+ import pandas as pd
3
+ import numpy as np
4
+ from pathlib import Path
5
+ from typing import List, Union, Tuple
6
+
7
+ # Import util libs.
8
+ import contextlib
9
+ import fbx
10
+
11
+ # Import custom data.
12
+ import globals
13
+
14
+
15
+ def center_axis(a: List[float]) -> np.array:
16
+ """
17
+ Centers a list of floats.
18
+ :param a: List of floats to center.
19
+ :return: The centered list as a `np.array`.
20
+ """
21
+ # Turn list into np array for optimized math.
22
+ a = np.array(a)
23
+
24
+ # Find the centroid by subtracting the lowest value from the highest value.
25
+ _min = np.min(a)
26
+ _max = np.max(a)
27
+ _c = _max - _min
28
+ # Center the array by subtracting the centroid.
29
+ a -= _c
30
+ return a
31
+
32
+
33
+ def make_ghost_markers(missing: int) -> np.array:
34
+ """
35
+ Creates a np array containing enough rows to fill a point cloud up to 1000 points.
36
+ Ghost markers are always unlabeled markers, therefore their actor and marker class is 0.
37
+ :param missing: `int` amount of missing rows in the cloud that need to be filled with this function.
38
+ :return: multidimensional `np.array` with the shape: (missing, 5).
39
+ """
40
+ return np.column_stack([
41
+ np.zeros((missing, 1), dtype=int), # 0
42
+ np.zeros((missing, 1), dtype=int), # 0
43
+ np.random.rand(missing, 1), # 0.0-1.0
44
+ np.random.rand(missing, 1), # 0.0-1.0
45
+ np.random.rand(missing, 1) # 0.0-1.0
46
+ ])
47
+
48
+
49
+ class FBXContainer:
50
+ # TODO: Model is currently built for training. Add testing mode.
51
+ def __init__(self, fbx_file: Path,
52
+ volume_dims: Tuple[float] = (10., 4., 10.),
53
+ max_actors: int = 10,
54
+ pc_size: int = 1000,
55
+ scale: float = 0.01):
56
+ """
57
+ Class that stores references to important nodes in an FBX file.
58
+ Offers utility functions to quickly load animation data.
59
+ :param fbx_file: `Path` to the file to load.
60
+ :param volume_dims: `tuple` of `float` that represent the dimensions of the capture volume in meters.
61
+ :param max_actors: `int` maximum amount of actors to expect in a point cloud.
62
+ :param pc_size: `int` amount of points in a point cloud.
63
+ """
64
+ if pc_size < max_actors * 73:
65
+ raise ValueError('Point cloud size must be large enough to contain the maximum amount of actors * 73'
66
+ f' markers: {pc_size}/{max_actors * 73}.')
67
+
68
+ # Python ENUM of the C++ time modes.
69
+ self.time_modes = globals.get_time_modes()
70
+ # Ordered list of marker names. Note: rearrange this in globals.py.
71
+ self.marker_names = globals.get_marker_names()
72
+
73
+ # Initiate empty lists to store references to nodes.
74
+ self.markers = []
75
+ self.actors = []
76
+ # Store names of the actors (all parent nodes that have the first 4 markers as children).
77
+ self.actor_names = []
78
+
79
+ # Split the dimensions tuple into its axes for easier access.
80
+ self.vol_x = volume_dims[0]
81
+ self.vol_y = volume_dims[1]
82
+ self.vol_z = volume_dims[2]
83
+
84
+ self.scale = scale
85
+
86
+ self.max_actors = max_actors
87
+ # Maximum point cloud size = 73 * max_actors + unlabeled markers.
88
+ self.pc_size = pc_size
89
+
90
+ self.fbx_file = fbx_file
91
+ self.valid_frames = []
92
+
93
+ self.__init_scene()
94
+ self.__init_anim()
95
+ self.__init_actors()
96
+ self.__init_markers()
97
+ self.__init_unlabeled_markers()
98
+
99
+ def __init_scene(self):
100
+ """
101
+ Stores scene, root, and time_mode properties.
102
+ Destroys the importer to remove the reference to the loaded file.
103
+ """
104
+ # Create an FBX manager and importer.
105
+ manager = fbx.FbxManager.Create()
106
+ importer = fbx.FbxImporter.Create(manager, '')
107
+
108
+ # Import the FBX file.
109
+ importer.Initialize(str(self.fbx_file))
110
+ self.scene = fbx.FbxScene.Create(manager, '')
111
+ importer.Import(self.scene)
112
+ self.root = self.scene.GetRootNode()
113
+ self.time_mode = self.scene.GetGlobalSettings().GetTimeMode()
114
+
115
+ # Destroy importer to remove reference to imported file.
116
+ # This will allow us to delete the uploaded file.
117
+ importer.Destroy()
118
+
119
+ def __init_anim(self):
120
+ """
121
+ Stores the anim_stack, num_frames, start_frame, end_frame properties.
122
+ """
123
+ # Get the animation stack and layer.
124
+ anim_stack = self.scene.GetCurrentAnimationStack()
125
+ self.anim_layer = anim_stack.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), 0)
126
+
127
+ # Find the total number of frames to expect from the local time span.
128
+ local_time_span = anim_stack.GetLocalTimeSpan()
129
+ self.num_frames = int(local_time_span.GetDuration().GetFrameCount(self.time_mode))
130
+ self.start_frame = local_time_span.GetStart().GetFrameCount(self.time_mode)
131
+ self.end_frame = local_time_span.GetStop().GetFrameCount(self.time_mode)
132
+
133
+ def __init_actors(self):
134
+ """
135
+ Goes through all root children (generation 1).
136
+ If a child has 4 markers as children, it is considered an actor (Shogun subject) and appended to actors
137
+ and actor_names list properties.
138
+ Also initializes an empty valid_frames list for each found actor.
139
+ """
140
+ # Find all parent nodes (/System, /Unlabeled_Markers, /Actor1, etc).
141
+ gen1_nodes = [self.root.GetChild(i) for i in range(self.root.GetChildCount())]
142
+ for gen1_node in gen1_nodes:
143
+ gen2_nodes = [gen1_node.GetChild(i) for i in
144
+ range(gen1_node.GetChildCount())] # Actor nodes (/Mimi/Hips, /Mimi/ARIEL, etc)
145
+
146
+ # If the first 3 marker names are children of this parent, it must be an actor.
147
+ if all(name in [node.GetName().split(':')[-1] for node in gen2_nodes] for name in self.marker_names[:4]):
148
+ self.actor_names.append(gen1_node.GetName())
149
+ self.actors.append(gen1_node)
150
+
151
+ self.actor_count = len(self.actors)
152
+ self.valid_frames = [[] for _ in range(self.actor_count)]
153
+
154
+ def __init_markers(self):
155
+ """
156
+ Goes through all actor nodes and stores references to its marker nodes.
157
+ """
158
+ for actor_node in self.actors:
159
+ actor_markers = {}
160
+ for marker_name in self.marker_names:
161
+ for actor_idx in range(actor_node.GetChildCount()):
162
+ child = actor_node.GetChild(actor_idx)
163
+ # Child name might have namespaces in it like this: Vera:ARIEL
164
+ # We want to match only on the actual name, so strip everything before off.
165
+ child_name = child.GetName().split(':')[-1]
166
+ if child_name == marker_name:
167
+ actor_markers[marker_name] = child
168
+
169
+ assert len(actor_markers) == len(self.marker_names), f'{actor_node.GetName()} does not have all markers.'
170
+
171
+ self.markers.append(actor_markers)
172
+
173
+ def __init_unlabeled_markers(self):
174
+ """
175
+ Looks for the Unlabeled_Markers parent node under the root and stores references to all unlabeled marker nodes.
176
+ """
177
+ # Find the Unlabeled_Markers parent node.
178
+ for i in range(self.root.GetChildCount()):
179
+ gen1_node = self.root.GetChild(i)
180
+ if gen1_node.GetName() == 'Unlabeled_Markers':
181
+ self.unlabeled_markers_parent = gen1_node
182
+ self.unlabeled_markers = [gen1_node.GetChild(um) for um in range(gen1_node.GetChildCount())]
183
+ return
184
+
185
+ def _check_actor(self, actor: int = 0):
186
+ """
187
+ Safety check to see if the actor `int` is a valid number (to avoid out of range errors).
188
+ :param actor: `int` actor index, which should be between 0-max_actors.
189
+ """
190
+ assert 0 <= actor <= self.actor_count, f'Actor index must be between 0 and {self.actor_count - 1}. ' \
191
+ f'It is {actor}.'
192
+
193
+ def _set_valid_frames_for_actor(self, actor: int = 0):
194
+ """
195
+ Checks for each frame in the frame range, and for each marker, if there is a keyframe present
196
+ at that frame on LocalTranslation X.
197
+ If the keyframe is missing, removes that frame from the list of valid frames for that actor.
198
+ This eventually leaves a list of frames where each number is guaranteed to have a keyframe on all markers.
199
+ The list is appended to valid_frames, which can be indexed per actor.
200
+ Finally, stores a list of frames that is valid for all actors in common_frames.
201
+ :param actor: `int` index of the actor to find keyframes for.
202
+ """
203
+ # Make sure the actor index is in range.
204
+ self._check_actor(actor)
205
+
206
+ frames = self.get_frame_range()
207
+ for _, marker in self.markers[actor].items():
208
+ # Get the animation curve for local translation x.
209
+ t_curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X')
210
+ # If an actor was recorded but seems to have no animation curves, we set their valid frames to nothing.
211
+ # Then we return, because there is no point in further checking non-existent keyframes.
212
+ if t_curve is None:
213
+ self.valid_frames[actor] = []
214
+ return
215
+
216
+ # Get all keyframes on the animation curve and store their frame numbers.
217
+ keys = [t_curve.KeyGet(i).GetTime().GetFrameCount(self.time_mode) for i in range(t_curve.KeyGetCount())]
218
+ # Check for each frame in frames if it is present in the list of keyframed frames.
219
+ for frame in frames:
220
+ if frame not in keys:
221
+ # If the frame is not present, that means there is no keyframe with that frame number,
222
+ # so we don't want to use that frame because it is invalid, so we remove it from the list.
223
+ with contextlib.suppress(ValueError):
224
+ frames.remove(frame)
225
+
226
+ self.valid_frames[actor] = frames
227
+
228
+ # Store all frame lists that have at least 1 frame.
229
+ other_lists = [r for r in self.valid_frames if r]
230
+ # Make one list that contains all shared frame numbers.
231
+ self.common_frames = [num for num in self.get_frame_range()
232
+ if all(num in other_list for other_list in other_lists)]
233
+
234
+ def set_valid_frames(self):
235
+ """
236
+ For each actor, calls _set_valid_frames_for_actor().
237
+ """
238
+ for i in range(self.actor_count):
239
+ self._set_valid_frames_for_actor(i)
240
+
241
+ def _check_valid_frames(self, actor: int = 0):
242
+ """
243
+ Safety check to see if the given actor has any valid frames stored.
244
+ If not, calls _set_valid_frames_for_actor() for that actor.
245
+ :param actor: `int` actor index.
246
+ """
247
+ self._check_actor(actor)
248
+
249
+ if not len(self.valid_frames[actor]):
250
+ self._set_valid_frames_for_actor(actor)
251
+
252
+ def _modify_pose(self, actor: int = 0, frame: int = 0) -> List[float]:
253
+ """
254
+ Evaluates all marker nodes for the given actor and modifies the resulting point cloud,
255
+ so it is centered and scaled properly for training.
256
+ :param actor: `int` actor index.
257
+ :param frame: `int` frame to evaluate the markers at.
258
+ :return: 1D list of `float` that contains the tx, ty and tz for each marker, in that order.
259
+ """
260
+ # Set new frame to evaluate at.
261
+ time = fbx.FbxTime()
262
+ time.SetFrame(frame)
263
+ # Prepare arrays for each axis.
264
+ x, y, z = [], [], []
265
+
266
+ # For each marker, store the x, y and z global position.
267
+ for n, m in self.markers[actor].items():
268
+ t = m.EvaluateGlobalTransform(time).GetT()
269
+ x += [t[0] * self.scale]
270
+ y += [t[1] * self.scale]
271
+ z += [t[2] * self.scale]
272
+
273
+ # Move the point cloud to the center of the x and y axes. This will put the actor in the middle.
274
+ x = center_axis(x)
275
+ z = center_axis(z)
276
+
277
+ # Move the actor to the middle of the volume floor by adding volume_dim/2 to x and z.
278
+ x += self.vol_x / 2.
279
+ z += self.vol_z / 2.
280
+
281
+ # Squeeze the actor into the 1x1 plane for the neural network by dividing the axes.
282
+ x /= self.vol_x
283
+ z /= self.vol_z
284
+ y = np.array(y) / self.vol_y
285
+
286
+ # TODO: Optionally: Add any extra modifications to the point cloud here.
287
+
288
+ # Append all values to a new array, one axis at a time.
289
+ # This way it will match the column names order.
290
+ pose = []
291
+ for i in range(len(x)):
292
+ pose += [x[i]]
293
+ pose += [y[i]]
294
+ pose += [z[i]]
295
+ return pose
296
+
297
+ def extract_scaled_translation(self, m: fbx.FbxNode, time: fbx.FbxTime) -> List[float]:
298
+ """
299
+ Evaluates a node's world translation at the given time and scales the vector down by a factor of self.scale.
300
+ :param m: `fbx.FbxNode` node that needs to be evaluated.
301
+ :param time: `fbx.FbxTime` at which frame/time the node needs to be evaluated.
302
+ :return: Translation vector as a list of floats.
303
+ """
304
+ t = m.EvaluateGlobalTransform(time).GetT()
305
+ return [t[i] * self.scale for i in range(3)]
306
+
307
+ def get_frame_range(self) -> List[int]:
308
+ """
309
+ Replacement and improvement for:
310
+ `list(range(self.num_frames))`
311
+ If the animation does not start at frame 0, this will return a list that has the correct frames.
312
+ :return: List of `int` frame numbers that are between the start and end frame of the animation.
313
+ """
314
+ return list(range(self.start_frame, self.end_frame))
315
+
316
+ def columns_from_joints(self) -> List[str]:
317
+ """
318
+ Generates a list of column names based on the (order of the) marker names.
319
+ :return: List of column names, in the form of [node1_tx, node1_ty, node1_tz, node2_tx, node2_ty, node2_tz..].
320
+ """
321
+ columns = []
322
+ for name in self.marker_names:
323
+ columns += [f'{name}x', f'{name}y', f'{name}z']
324
+
325
+ return columns
326
+
327
+ def get_marker_by_name(self, actor: int, name: str):
328
+ """
329
+ Returns the reference to the actor's marker.
330
+ :param actor: `int` actor index.
331
+ :param name: `str` marker name.
332
+ :return: `fbx.FbxNode` reference.
333
+ """
334
+ self._check_actor(actor)
335
+ return self.markers[actor][name]
336
+
337
+ def print_valid_frames_stats_for_actor(self, actor: int = 0):
338
+ """
339
+ Prints: actor name, total amount of frames in the animation, amount of valid frames for the given actor,
340
+ number of missing frames, and the ratio of valid/total frames.
341
+ :param actor: `int` actor index.
342
+ :return: Tuple of `str` actor name, `int` total frames, `int` amount of valid frames, `float` valid frame ratio.
343
+ """
344
+ self._check_actor(actor)
345
+ self._check_valid_frames(actor)
346
+
347
+ len_valid = len(self.valid_frames[actor])
348
+ ratio = (len_valid / self.num_frames) * 100
349
+ print(f'Actor {self.actor_names[actor]}: Total: {self.num_frames}, valid: {len_valid}, missing: '
350
+ f'{self.num_frames - len_valid}, ratio: {ratio:.2f}% valid.')
351
+
352
+ return self.actor_names[actor], self.num_frames, len_valid, ratio
353
+
354
+ def get_valid_frames_for_actor(self, actor: int = 0) -> List[int]:
355
+ """
356
+ Collects the valid frames for the given actor.
357
+ :param actor: `int` actor index.
358
+ :return: List of `int` frame numbers that have a keyframe on tx for all markers.
359
+ """
360
+ self._check_valid_frames(actor)
361
+ return self.valid_frames[actor]
362
+
363
+ def extract_valid_translations_per_actor(self, actor: int = 0) -> List[List[float]]:
364
+ """
365
+ Assembles the poses for the valid frames for the given actor as a 2D list where each row is a pose.
366
+ :param actor: `int` actor index.
367
+ :return: List of poses, where each pose is a list of `float` translations.
368
+ """
369
+ # Ensure the actor index is within range.
370
+ self._check_actor(actor)
371
+
372
+ poses = []
373
+ # Go through all valid frames for this actor.
374
+ # Note that these frames can be different per actor.
375
+ for frame in self.valid_frames[actor]:
376
+ # Get the centered point cloud as a 1D list.
377
+ pose_at_frame = self._modify_pose(actor, frame)
378
+ poses.append(pose_at_frame)
379
+
380
+ return poses
381
+
382
+ def extract_all_valid_translations(self) -> pd.DataFrame:
383
+ """
384
+ Convenience method that calls self.extract_valid_translations_per_actor() for all actors
385
+ and returns a `DataFrame` containing all poses after each other.
386
+ :return: `DataFrame` where each row is a pose.
387
+ """
388
+ # Note that the column names are/must be in the same order as the markers.
389
+ columns = self.columns_from_joints()
390
+
391
+ all_poses = []
392
+ # For each actor, add their valid poses to all_poses.
393
+ for i in range(self.actor_count):
394
+ all_poses.extend(self.extract_valid_translations_per_actor(i))
395
+
396
+ return pd.DataFrame(all_poses, columns=columns)
397
+
398
+ def get_transformed_worldspace(self, m: fbx.FbxNode, time: fbx.FbxTime) -> List[float]:
399
+ """
400
+ Evaluates the world translation of the given marker at the given time,
401
+ scales it down by scale and turns it into a vector list.
402
+ :param m: `fbx.FbxNode` marker to evaluate the world translation of.
403
+ :param time: `fbx.FbxTime` time to evaluate at.
404
+ :return: Vector in the form: [tx, ty, tz].
405
+ """
406
+ t = m.EvaluateGlobalTransform(time).GetT()
407
+ x = t[0] * self.scale / self.vol_x
408
+ y = t[1] * self.scale / self.vol_y
409
+ z = t[2] * self.scale / self.vol_z
410
+
411
+ return [x, y, z]
412
+
413
+ def is_kf_present(self, marker: fbx.FbxNode, time: fbx.FbxTime) -> bool:
414
+ """
415
+ Returns True if a keyframe is found on the given node's local translation x animation curve.
416
+ Else returns False.
417
+ :param marker: `fbx.FbxNode` marker node to evaluate.
418
+ :param time: `fbx.FbxTime` time to evaluate at.
419
+ :return: True if a keyframe was found, False otherwise.
420
+ """
421
+ curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X')
422
+ return False if curve is None else curve.KeyFind(time) != -1
423
+
424
+ def get_sparse_cloud(self, time: fbx.FbxTime) -> np.array:
425
+ """
426
+ For each actor,
427
+ :param time:
428
+ :return:
429
+ """
430
+ cloud = []
431
+ # Iterate through all actors to get their markers' world translations and add them to the cloud list.
432
+ for actor_idx in range(self.actor_count):
433
+
434
+ cloud.extend(
435
+ # This actor's point cloud is made up of all markers that have a keyframe at the given time.
436
+ # For each marker, we create this row: [actor class (index+1), marker class (index+1), tx, ty, tz].
437
+ # We use index+1 because the unlabeled markers will use index 0 for both classes.
438
+ [actor_idx + 1, marker_class, *self.get_transformed_worldspace(m, time)]
439
+ for marker_class, (marker_name, m) in enumerate(
440
+ self.markers[actor_idx].items(), start=1
441
+ )
442
+ # Only add the marker if it has a keyframe. Missing keyframes on these markers are potentially
443
+ # among the keyframes on the unlabeled markers. The job of the labeler AI is to predict which
444
+ # point (unlabeled or labeled) is which marker.
445
+ if self.is_kf_present(m, time)
446
+ )
447
+
448
+ # Unlabeled markers are their own 'actor', so we only need one loop here.
449
+ for m in self.unlabeled_markers:
450
+ if self.is_kf_present(m, time):
451
+ # Unlabeled markers use actor class 0 and marker class 0.
452
+ cloud.extend([[0, 0, *self.get_transformed_worldspace(m, time)]])
453
+
454
+ # If the data is extremely noisy, it might have only a few labeled markers and a lot of unlabeled markers.
455
+ # The returned point cloud is not allowed to be bigger than the maximum size (self.pc_size),
456
+ # so return the cloud as a np array that cuts off any excessive markers.
457
+ return np.array(cloud)[:self.pc_size]
458
+
459
+ def get_timeline_sparse_cloud(self) -> np.array:
460
+ """
461
+ Convenience method that calls self.get_sparse_cloud() for all frames in the frame range
462
+ and returns the combined result.
463
+ :return: `np.array` that contains a sparse cloud for each frame in the frame range.
464
+ """
465
+ # We need time objects in a list to do list comprehension in the return line.
466
+ # The SetFrame() method is a void/in-place method, so we can't use list comprehension
467
+ # to create a list of fbx.FbxTime()s.
468
+ time = fbx.FbxTime()
469
+ times = []
470
+ for f in self.get_frame_range():
471
+ # Use in-place function to update the time. This returns None, so we can't append this directly.
472
+ time.SetFrame(f)
473
+ times.append(time)
474
+
475
+ return np.array([self.get_sparse_cloud(t) for t in times])
476
+
477
+ def get_timeline_dense_cloud(self, shuffle: bool = False) -> np.array:
478
+ """
479
+ For each frame in the frame range, collects the point cloud that is present in the file.
480
+ Then it creates a ghost cloud of random markers that are treated as unlabeled markers,
481
+ and adds them together to create a dense cloud whose shape is always (self.pc_size, 5).
482
+ Optionally shuffles this dense cloud before adding it to the final list.
483
+ :param shuffle: If `True`, shuffles the dense point cloud before appending it to the overall list.
484
+ :return: `np.array` that contains a dense point cloud for each frame,
485
+ with a shape of (self.num_frames, self.pc_size, 5).
486
+ """
487
+ time = fbx.FbxTime()
488
+ clouds = []
489
+ for frame in self.get_frame_range():
490
+ time.SetFrame(frame)
491
+ cloud = self.get_sparse_cloud(time)
492
+ missing = self.pc_size - cloud.shape[0]
493
+
494
+ # Only bother creating ghost markers if there are any missing rows.
495
+ if missing > 0:
496
+ ghost_cloud = make_ghost_markers(missing)
497
+ cloud = np.vstack([cloud, ghost_cloud])
498
+
499
+ # Shuffle the rows if needed. Because each row contains all dependent and independent variables,
500
+ # shuffling won't mess up the labels.
501
+ if shuffle:
502
+ np.random.shuffle(cloud)
503
+
504
+ clouds.append(cloud)
505
+
506
+ return np.array(clouds)
507
+
508
+ def split_timeline_dense_cloud(self, cloud: np.array = None, shuffle: bool = False) \
509
+ -> Tuple[np.array, np.array, np.array]:
510
+ """
511
+ Splits a timeline dense cloud with shape (self.num_frames, self.pc_size, 5) into 3 different
512
+ arrays:
513
+ 1. A `np.array` with the actor classes as shape (self.num_frames, self.pc_size, 1).
514
+ 2. A `np.array` with the marker classes as shape (self.num_frames, self.pc_size, 1).
515
+ 3. A `np.array` with the translation floats as shape (self.num_frames, self.pc_size, 3).
516
+ :param cloud: `np.array` of shape (self.num_frames, self.pc_size, 5) that contains a dense point cloud
517
+ (self.pc_size, 5) per frame in the frame range.
518
+ :param shuffle: `bool` whether to shuffle the generated cloud if no cloud was given.
519
+ :return: Return tuple of `np.array` as (actor classes, marker classes, translation vectors).
520
+ """
521
+ if cloud is None:
522
+ cloud = self.get_timeline_dense_cloud(shuffle)
523
+
524
+ assert cloud.shape[1] == 1000, f"Dense cloud doesn't have enough points. {cloud.shape[1]}/1000."
525
+ assert cloud.shape[2] == 5, f"Dense cloud is missing columns: {cloud.shape[2]}/5."
526
+
527
+ # Return np arrays as (actor classes, marker classes, translation vectors).
528
+ return cloud[:, :, 0], cloud[:, :, 1], cloud[:, :, -3:]
529
+
530
+ def convert_class_to_actor(self, c: float = 0):
531
+ """
532
+ Returns the actor name based on the class value.
533
+ :param c: `float` actor class index.
534
+ :return: `str` actor name.
535
+ """
536
+ return 'UNLABELED' if c == 0. else self.actor_names[int(c) - 1]
537
+
538
+ def convert_class_to_marker(self, c: float = 0):
539
+ """
540
+ Returns the marker name based on the class value.
541
+ :param c: `float` marker class index.
542
+ :return: `str` marker name.
543
+ """
544
+ return 'UNLABELED' if c == 0. else self.marker_names[int(c) - 1]
545
+
546
+ def export(self, t: str = 'csv', output_file: Path = None) -> Union[bytes, Path]:
547
+ # Get the dataframe with all animation data.
548
+ df = self.extract_all_valid_translations()
549
+
550
+ if t == 'string':
551
+ return df.to_csv(index=False).encode('utf-8')
552
+
553
+ if output_file is None:
554
+ output_file = self.fbx_file.with_suffix('.csv')
555
+
556
+ if output_file.suffix != '.csv':
557
+ raise ValueError(f'{output_file} needs to be a .csv file.')
558
+
559
+ df.to_csv(output_file, index=False)
560
+ return output_file
561
+
562
+
563
+ # d = FBXContainer(Path('G:/Firestorm/mocap-ai/data/fbx/dowg/TAKE_01+1_ALL_001.fbx'))
564
+ # TODO: Make functions to write new class predictions to the fbx file.
labeler/read_fbx.py DELETED
@@ -1,238 +0,0 @@
1
- import pandas as pd
2
- import numpy as np
3
- from pathlib import Path
4
-
5
- import contextlib
6
- import fbx
7
- from typing import List, Union
8
-
9
- # Import custom data.
10
- import globals
11
-
12
-
13
- class MarkerData:
14
- # TODO: Model is currently built for training. Add testing mode.
15
- def __init__(self, fbx_file: Path):
16
- """
17
- Class that stores references to important nodes in an FBX file.
18
- Offers utility functions to quickly load animation data.
19
- :param fbx_file: `str` Path to the file to load.
20
- """
21
- self.time_modes = globals.get_time_modes()
22
- self.marker_names = globals.get_marker_names()
23
-
24
- self.markers = []
25
- self.actor_names = []
26
- self.actors = []
27
-
28
- self.volume_dim_x = 10.
29
- self.volume_dim_y = 4.
30
-
31
- self.fbx_file = fbx_file
32
- self.valid_frames = []
33
-
34
- self.__init_scene()
35
- self.__init_anim()
36
- self.__init_actors()
37
- self.__init_markers()
38
-
39
- def __init_scene(self):
40
- # Create an FBX manager and importer
41
- manager = fbx.FbxManager.Create()
42
- importer = fbx.FbxImporter.Create(manager, '')
43
-
44
- # Import the FBX file
45
- importer.Initialize(str(self.fbx_file))
46
- self.scene = fbx.FbxScene.Create(manager, '')
47
- importer.Import(self.scene)
48
- self.root = self.scene.GetRootNode()
49
- self.time_mode = self.scene.GetGlobalSettings().GetTimeMode()
50
-
51
- # Destroy importer to remove reference to imported file.
52
- # This will allow us to delete the uploaded file.
53
- importer.Destroy()
54
-
55
- def __init_anim(self):
56
- # Get the animation stack and layer.
57
- anim_stack = self.scene.GetCurrentAnimationStack()
58
- self.anim_layer = anim_stack.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), 0)
59
-
60
- # Find the total number of frames to expect from the local time span.
61
- local_time_span = anim_stack.GetLocalTimeSpan()
62
- self.num_frames = int(local_time_span.GetDuration().GetFrameCount(self.time_mode))
63
-
64
- def __init_actors(self):
65
-
66
- # Find all parent nodes (/System, /_Unlabeled_Markers, /Actor1, etc).
67
- gen1_nodes = [self.root.GetChild(i) for i in range(self.root.GetChildCount())]
68
- for gen1_node in gen1_nodes:
69
- gen2_nodes = [gen1_node.GetChild(i) for i in
70
- range(gen1_node.GetChildCount())] # Actor nodes (/Mimi/Hips, /Mimi/ARIEL, etc)
71
-
72
- # If the first 3 marker names are children of this parent, it must be an actor.
73
- if all(name in [node.GetName().split(':')[-1] for node in gen2_nodes] for name in self.marker_names[:4]):
74
- self.actor_names.append(gen1_node.GetName())
75
- self.actors.append(gen1_node)
76
-
77
- self.actor_count = len(self.actors)
78
- self.valid_frames = [[] for _ in range(self.actor_count)]
79
-
80
- def __init_markers(self):
81
- for actor_node in self.actors:
82
- actor_markers = {}
83
- for marker_name in self.marker_names:
84
- for actor_idx in range(actor_node.GetChildCount()):
85
- child = actor_node.GetChild(actor_idx)
86
- # Child name might have namespaces in it like this: Vera:ARIEL
87
- # We want to match only on the actual name, so strip everything before off.
88
- child_name = child.GetName().split(':')[-1]
89
- if child_name == marker_name:
90
- actor_markers[marker_name] = child
91
-
92
- assert len(actor_markers) == len(self.marker_names), f'{actor_node.GetName()} does not have all markers.'
93
-
94
- self.markers.append(actor_markers)
95
-
96
- def _check_actor(self, actor: int = 0):
97
- assert 0 <= actor <= self.actor_count, f'Actor number must be between 0 and {self.actor_count - 1}. ' \
98
- f'It is {actor}.'
99
-
100
- def _set_valid_frames_for_actor(self, actor: int = 0):
101
- self._check_actor(actor)
102
-
103
- frames = list(range(self.num_frames))
104
- for marker_name in self.marker_names:
105
- marker = self.markers[actor][marker_name]
106
- t_curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X')
107
- # If an actor was recorded but seems to have no animation curves, we set their valid frames to nothing.
108
- if t_curve is None:
109
- self.valid_frames[actor] = []
110
- return
111
-
112
- keys = [t_curve.KeyGet(i).GetTime().GetFrameCount(self.time_mode) for i in range(t_curve.KeyGetCount())]
113
- for frame in frames:
114
- if frame not in keys:
115
- with contextlib.suppress(ValueError):
116
- frames.remove(frame)
117
-
118
- self.valid_frames[actor] = frames
119
- return
120
-
121
- def _check_valid_frames(self, actor: int = 0):
122
- if not len(self.valid_frames[actor]):
123
- self._set_valid_frames_for_actor(actor)
124
-
125
- def _modify_pose(self, actor, frame) -> List[float]:
126
- # Set new frame to evaluate at.
127
- time = fbx.FbxTime()
128
- time.SetFrame(frame)
129
- # Prepare arrays for each axis.
130
- x, y, z = [], [], []
131
-
132
- # For each marker, store the x, y and z global position.
133
- for n, m in self.markers[actor].items():
134
- t = m.EvaluateGlobalTransform(time).GetRow(3)
135
- x += [t[0] * 0.01]
136
- y += [t[1] * 0.01]
137
- z += [t[2] * 0.01]
138
-
139
- # Move the point cloud to the center of the x and y axes. This will put the actor in the middle.
140
- x = self.center_axis(x)
141
- z = self.center_axis(z)
142
-
143
- # Move the actor to the middle of the volume floor by adding volume_dim_x/2 to x and z.
144
- x += self.volume_dim_x / 2.
145
- z += self.volume_dim_x / 2.
146
-
147
- # Squeeze the actor into the 1x1 plane for the neural network by dividing the axes.
148
- x /= self.volume_dim_x
149
- z /= self.volume_dim_x
150
- y = np.array(y) / self.volume_dim_y
151
-
152
- # TODO: Optionally: Add any extra modifications to the point cloud here.
153
-
154
- # Append all values to a new array, one axis at a time.
155
- # This way it will match the column names order.
156
- pose = []
157
- for i in range(len(x)):
158
- pose += [x[i]]
159
- pose += [y[i]]
160
- pose += [z[i]]
161
- return pose
162
-
163
- def get_marker_by_name(self, actor: int, name: str):
164
- self._check_actor(actor)
165
- return self.markers[actor][name]
166
-
167
- def get_valid_frames_for_actor(self, actor: int = 0):
168
- self._check_valid_frames(actor)
169
- return self.valid_frames[actor]
170
-
171
- def print_valid_frames_stats_for_actor(self, actor: int = 0):
172
- self._check_actor(actor)
173
- self._check_valid_frames(actor)
174
-
175
- len_valid = len(self.valid_frames[actor])
176
- ratio = (len_valid / self.num_frames) * 100
177
- print(f'Actor {self.actor_names[actor]}: Total: {self.num_frames}, valid: {len_valid}, missing: '
178
- f'{self.num_frames - len_valid}, ratio: {ratio:.2f}% valid.')
179
-
180
- return self.actor_names[actor], self.num_frames, len_valid, ratio
181
-
182
- def columns_from_joints(self):
183
- columns = []
184
- for name in self.marker_names:
185
- columns += [f'{name}x', f'{name}y', f'{name}z']
186
-
187
- return columns
188
-
189
- @staticmethod
190
- def center_axis(a) -> np.array:
191
- a = np.array(a)
192
- _min = np.min(a)
193
- _max = np.max(a)
194
-
195
- _c = _max - _min
196
- a -= _c
197
- return a
198
-
199
- def extract_translations_per_actor(self, actor: int = 0):
200
- self._check_actor(actor)
201
- self._check_valid_frames(actor)
202
-
203
- poses = []
204
- # Go through all valid frames for this actor.
205
- # Note that these frames can be different per actor.
206
- for frame in self.valid_frames[actor]:
207
- # Get the centered point cloud as an array.
208
- pose_at_frame = self._modify_pose(actor, frame)
209
- poses.append(pose_at_frame)
210
-
211
- return poses
212
-
213
- def extract_all_translations(self) -> pd.DataFrame:
214
-
215
- columns = self.columns_from_joints()
216
-
217
- all_poses = []
218
-
219
- for i in range(self.actor_count):
220
- all_poses.extend(self.extract_translations_per_actor(i))
221
-
222
- return pd.DataFrame(all_poses, columns=columns)
223
-
224
- def export(self, t: str = 'csv', output_file: Path = None) -> Union[bytes, Path]:
225
- # Get the dataframe with all animation data.
226
- df = self.extract_all_translations()
227
-
228
- if t == 'string':
229
- return df.to_csv(index=False).encode('utf-8')
230
-
231
- if output_file is None:
232
- output_file = self.fbx_file.with_suffix('.csv')
233
-
234
- if output_file.suffix != '.csv':
235
- raise ValueError(f'{output_file} needs to be a .csv file.')
236
-
237
- df.to_csv(output_file, index=False)
238
- return output_file