Natsha commited on
Commit
a628625
·
1 Parent(s): 22ebabc

Fixed the translation bug. TODO: Make sure the new animation curves are properly connected to their nodes.

Browse files
Files changed (1) hide show
  1. fbx_handler.py +144 -115
fbx_handler.py CHANGED
@@ -45,6 +45,15 @@ def make_ghost_markers(missing: int) -> np.array:
45
  np.zeros((missing, 1), dtype=int), # 0
46
  np.random.rand(missing, 1), # 0.0-1.0
47
  np.random.rand(missing, 1), # 0.0-1.0
 
 
 
 
 
 
 
 
 
48
  np.random.rand(missing, 1) # 0.0-1.0
49
  ])
50
 
@@ -60,15 +69,31 @@ def append_suffix(file_path: Path, suffix: str = '_INF'):
60
  return file_path.with_name(new_file_name)
61
 
62
 
 
 
 
 
 
 
 
 
 
 
63
  def merge_tdc(actor_classes: np.array,
64
  marker_classes: np.array,
65
  translation_vectors: np.array,
 
 
66
  ordered: bool = True) -> np.array:
67
  # Actor and marker classes enter as shape (x, 1000), so use np.expand_dims to create an extra dimension at the end.
68
  # Return the concatenated array of shape (x, 1000, 5), which matches the original timeline dense cloud before
69
  # splitting it into sub arrays.
70
- tdc = np.concatenate((np.expand_dims(actor_classes, -1), np.expand_dims(marker_classes, -1),
71
- translation_vectors), axis=2)
 
 
 
 
72
  if ordered:
73
  tdc = sort_cloud(tdc)
74
 
@@ -115,27 +140,6 @@ def create_keyframe(anim_curve: fbx.FbxAnimCurve, frame: int, value: float):
115
  return True
116
 
117
 
118
- def replace_keyframes_on_curve(anim_curve: fbx.FbxAnimCurve, frames: List[int], values: np.array):
119
- # Check if the anim_curve is of type FbxAnimCurve
120
- if not isinstance(anim_curve, fbx.FbxAnimCurve):
121
- print("Input is not an FbxAnimCurve instance.")
122
- return False
123
-
124
- # Check if the frames and values lists have the same length
125
- if len(frames) != len(values):
126
- print("Frames and values lists have different lengths.")
127
- return False
128
-
129
- # Remove all existing keyframes
130
- anim_curve.KeyClear()
131
-
132
- # Create new keyframes with the given frames and values
133
- for frame, value in zip(frames, values):
134
- create_keyframe(anim_curve, frame, value)
135
-
136
- return True
137
-
138
-
139
  def get_child_node_by_name(parent_node: fbx.FbxNode, name: str, ignore_namespace: bool = False) \
140
  -> Union[fbx.FbxNode, None]:
141
  for c in range(parent_node.GetChildCount()):
@@ -167,7 +171,16 @@ def timeline_cloud_to_dict(data: np.array, start_frame: int = 0) -> dict:
167
  if actor_class == 0 or marker_class == 0:
168
  continue
169
 
170
- translation_vector = data[frame, node, 2:]
 
 
 
 
 
 
 
 
 
171
 
172
  # Create the actor dictionary if it doesn't exist.
173
  if actor_class not in result:
@@ -178,35 +191,31 @@ def timeline_cloud_to_dict(data: np.array, start_frame: int = 0) -> dict:
178
  result[actor_class][marker_class] = {}
179
 
180
  # Add the frame number and translation vector to the node dictionary.
181
- result[actor_class][marker_class][frame + start_frame] = translation_vector
182
 
183
  return result
184
 
185
 
186
- def world_to_local_translation(node, parent_node, f):
187
  t = fbx.FbxTime()
188
- t.SetFrame(f)
189
- child_world_matrix = node.EvaluateGlobalTransform(t)
190
- child_world_matrix = np.array([
191
- [child_world_matrix.Get(i, j) for j in range(4)] for i in range(4)
192
- ])
193
- # Convert the world_translation vector to a homogeneous 4D vector by appending a 1
194
- # world_translation_homogeneous = np.append(world_translation, 1)
195
 
196
- # Get the parent's world transformation matrix as a numpy array
197
- parent_world_matrix = parent_node.EvaluateGlobalTransform(t)
198
- parent_world_matrix = np.array([
199
- [parent_world_matrix.Get(i, j) for j in range(4)] for i in range(4)
200
- ])
201
 
202
- # Compute the inverse of the parent's world transformation matrix
203
- parent_world_matrix_inv = np.linalg.inv(parent_world_matrix)
 
 
 
204
 
205
- return np.dot(parent_world_matrix_inv, child_world_matrix)
206
 
207
 
208
  class FBXContainer:
209
- # TODO: Model is currently built for training. Add testing mode.
210
  def __init__(self, fbx_file: Path,
211
  volume_dims: Tuple[float] = (10., 4., 10.),
212
  max_actors: int = 10,
@@ -443,7 +452,7 @@ class FBXContainer:
443
  z /= self.vol_z
444
  y = np.array(y) / self.vol_y
445
 
446
- # TODO: Optionally: Add any extra modifications to the point cloud here.
447
 
448
  # Append all values to a new array, one axis at a time.
449
  # This way it will match the column names order.
@@ -592,7 +601,7 @@ class FBXContainer:
592
 
593
  return pd.DataFrame(all_poses, columns=columns)
594
 
595
- def get_worldspace(self, m: fbx.FbxNode, time: fbx.FbxTime, apply_transform: bool = True) -> List[float]:
596
  """
597
  Evaluates the world translation of the given marker at the given time,
598
  scales it down by scale and turns it into a vector list.
@@ -601,17 +610,23 @@ class FBXContainer:
601
  :param apply_transform: `bool` Whether to transform the translation or not.
602
  :return: Vector in the form: [tx, ty, tz].
603
  """
604
- t = m.EvaluateGlobalTransform(time).GetT()
 
 
 
 
 
 
605
  if not apply_transform:
606
- return [t[i] for i in range(3)]
607
 
608
  # First multiply by self.scale, which turns meters to centimeters.
609
  # Then divide by volume dimensions, to normalize to the total area of the capture volume.
610
- x = np.clip(t[0], -(self.vol_x * 0.5), self.vol_x * 0.5) * self.scale / self.vol_x
611
- y = np.clip(t[1], -(self.vol_y * 0.5), self.vol_y * 0.5) * self.scale / self.vol_y
612
- z = np.clip(t[2], -(self.vol_z * 0.5), self.vol_z * 0.5) * self.scale / self.vol_z
613
 
614
- return [x, y, z]
615
 
616
  def is_kf_present(self, marker: fbx.FbxNode, time: fbx.FbxTime) -> bool:
617
  """
@@ -638,7 +653,7 @@ class FBXContainer:
638
  # because by adding the labeled markers after (which use classes 1-74),
639
  # we eventually return an array that doesn't need to be sorted anymore.
640
  cloud = [
641
- [0, 0, *self.get_worldspace(m, time, apply_transform)]
642
  for m in self.unlabeled_markers
643
  if self.is_kf_present(m, time)
644
  ]
@@ -649,7 +664,7 @@ class FBXContainer:
649
  # This actor's point cloud is made up of all markers that have a keyframe at the given time.
650
  # For each marker, we create this row: [actor class (index+1), marker class (index+1), tx, ty, tz].
651
  # We use index+1 because the unlabeled markers will use index 0 for both classes.
652
- [actor_idx + 1, marker_class, *self.get_worldspace(m, time, apply_transform)]
653
  for marker_class, (marker_name, m) in enumerate(
654
  self.markers[actor_idx].items(), start=1
655
  )
@@ -673,17 +688,17 @@ class FBXContainer:
673
  """
674
  return np.array([self.get_sc(f, apply_transform) for f in self.get_frame_range()])
675
 
676
- def get_tdc(self, shuffle: bool = False,
677
- r: Union[int, Tuple[int, int]] = None,
678
  apply_transform: bool = True) -> np.array:
679
  """
680
  For each frame in the frame range, collects the point cloud that is present in the file.
681
  Then it creates a ghost cloud of random markers that are treated as unlabeled markers,
682
  and adds them together to create a dense cloud whose shape is always (self.pc_size, 5).
683
  Optionally shuffles this dense cloud before adding it to the final list.
684
- :param shuffle: If `True`, shuffles the dense point cloud of each frame.
685
  :param r: tuple of `int` that indicates the frame range to get. Default is None,
686
  resulting in the animation frame range.
 
687
  :param apply_transform: `bool` Whether to transform the translation or not.
688
  :return: `np.array` that contains a dense point cloud for each frame,
689
  with a shape of (self.num_frames, self.pc_size, 5).
@@ -722,13 +737,14 @@ class FBXContainer:
722
  return np.array(clouds)
723
 
724
  def split_tdc(self, cloud: np.array = None, shuffle: bool = False, apply_transform: bool = True) \
725
- -> Tuple[np.array, np.array, np.array]:
726
  """
727
  Splits a timeline dense cloud with shape (self.num_frames, self.pc_size, 5) into 3 different
728
  arrays:
729
  1. A `np.array` with the actor classes as shape (self.num_frames, self.pc_size, 1).
730
  2. A `np.array` with the marker classes as shape (self.num_frames, self.pc_size, 1).
731
- 3. A `np.array` with the translation floats as shape (self.num_frames, self.pc_size, 3).
 
732
  :param cloud: `np.array` of shape (self.num_frames, self.pc_size, 5) that contains a dense point cloud
733
  (self.pc_size, 5) per frame in the frame range.
734
  :param shuffle: `bool` whether to shuffle the generated cloud if no cloud was given.
@@ -736,13 +752,15 @@ class FBXContainer:
736
  :return: Return tuple of `np.array` as (actor classes, marker classes, translation vectors).
737
  """
738
  if cloud is None:
739
- cloud = self.get_tdc(shuffle, apply_transform=apply_transform)
740
 
741
- assert cloud.shape[1] == 1000, f"Dense cloud doesn't have enough points. {cloud.shape[1]}/1000."
742
- assert cloud.shape[2] == 5, f"Dense cloud is missing columns: {cloud.shape[2]}/5."
 
 
743
 
744
- # Return np arrays as (actor classes, marker classes, translation vectors).
745
- return cloud[:, :, 0], cloud[:, :, 1], cloud[:, :, -3:]
746
 
747
  def convert_class_to_actor(self, c: float = 0):
748
  """
@@ -841,19 +859,66 @@ class FBXContainer:
841
  self.remove_unlabeled_markers()
842
  self.remove_system()
843
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
844
  def replace_keyframes_per_marker(self, marker: fbx.FbxNode, marker_keys: dict) -> None:
845
- parent = marker.GetParent()
846
- for axis in ['X', 'Y', 'Z']:
847
- curve = marker.LclTranslation.GetCurve(self.anim_layer, axis)
 
 
 
 
848
  curve.KeyModifyBegin()
849
- if curve is not None:
850
- curve.KeyClear()
851
 
852
- for frame, world_translation in marker_keys.items():
853
- local_translation = world_to_local_translation(world_translation, parent)
 
 
 
 
 
854
 
855
- for axis_idx in range(3):
856
- create_keyframe(curve, frame, local_translation[axis_idx])
857
  curve.KeyModifyEnd()
858
 
859
  def replace_keyframes_per_actor(self, actor: int, actor_keys: dict) -> None:
@@ -866,48 +931,12 @@ class FBXContainer:
866
 
867
 
868
  # d = FBXContainer(Path('G:/Firestorm/mocap-ai/data/fbx/dowg/TAKE_01+1_ALL_001.fbx'))
869
- # t = fbx.FbxTime()
870
- # t.SetFrame(0)
871
- #
872
- # one = list(d.unlabeled_markers[0].EvaluateGlobalTransform(t).GetT())
873
- # # one = np.array([
874
- # # [one.Get(i, j) for j in range(4)] for i in range(4)
875
- # # ])
876
- # two = world_to_local_translation(d.unlabeled_markers[0], d.unlabeled_markers_parent, 0)[3, :3]
877
- # print(one)
878
- # print(two)
879
- # for e in zip(one, two):
880
- # print(e)
881
- # f = d.get_sc(0, False)
882
- # f = f[f[:, 1] == 1.]
883
- # print(f)
884
- # print(d.convert_class_to_actor(f[0]))
885
- # print(d.convert_class_to_marker(f[1]))
886
- # print(f[2:5])
887
- # lt = world_to_local_translation(f[2:5], d.unlabeled_markers_parent)
888
- # print(lt)
889
-
890
- # for u in sc:
891
- # print(d.convert_class_to_actor(u[0]), d.convert_class_to_marker(u[1]))
892
- # train_cloud = d.get_tdc()
893
- # train_actors, train_markers, train_X = d.split_tdc(train_cloud)
894
- # test_cloud = d.get_tdc(r=1, apply_transform=False)
895
- # for row in test_cloud:
896
- # for m in row[:5]:
897
- # t = m[2:5]
898
- # print(t)
899
- # lt = world_to_local_translation()
900
- # print()
901
- # print(row[:5, 2:5])
902
- # print(test_cloud.shape)
903
- # print(test_cloud[0, :, :2])
904
- # print(d.convert_class_to_actor(test_cloud[0, 0, 0]))
905
- # print(d.convert_class_to_marker(test_cloud[0, 0, 1]))
906
- # test_actors, test_markers, test_X = d.split_tdc(test_cloud, apply_transform=False)
907
- # Predict...
908
- # merged_preds = merge_tdc(train_actors, train_markers, test_X, ordered=False)
909
- # di = timeline_cloud_to_dict(test_cloud)
910
-
911
- # d.replace_keyframes_for_all_actors(di)
912
- # d.cleanup()
913
  # d.export_fbx(Path('G:/Firestorm/mocap-ai/data/fbx/export/TAKE_01+1_ALL_001.fbx'))
 
45
  np.zeros((missing, 1), dtype=int), # 0
46
  np.random.rand(missing, 1), # 0.0-1.0
47
  np.random.rand(missing, 1), # 0.0-1.0
48
+ np.random.rand(missing, 1), # 0.0-1.0
49
+ np.random.rand(missing, 1), # 0.0-1.0
50
+ np.random.rand(missing, 1), # 0.0-1.0
51
+ np.random.rand(missing, 1), # 0.0-1.0
52
+ np.random.rand(missing, 1), # 0.0-1.0
53
+ np.random.rand(missing, 1), # 0.0-1.0
54
+ np.random.rand(missing, 1), # 0.0-1.0
55
+ np.random.rand(missing, 1), # 0.0-1.0
56
+ np.random.rand(missing, 1), # 0.0-1.0
57
  np.random.rand(missing, 1) # 0.0-1.0
58
  ])
59
 
 
69
  return file_path.with_name(new_file_name)
70
 
71
 
72
+ def append_zero(arr: np.ndarray) -> np.ndarray:
73
+ zeros = np.zeros((arr.shape[0], arr.shape[1], 1), dtype=float)
74
+ return np.concatenate((arr, zeros), axis=-1)
75
+
76
+
77
+ def append_one(arr: np.ndarray) -> np.ndarray:
78
+ ones = np.ones((arr.shape[0], arr.shape[1], 1), dtype=float)
79
+ return np.concatenate((arr, ones), axis=-1)
80
+
81
+
82
  def merge_tdc(actor_classes: np.array,
83
  marker_classes: np.array,
84
  translation_vectors: np.array,
85
+ rotation_vectors: np.array,
86
+ scale_vectors: np.array,
87
  ordered: bool = True) -> np.array:
88
  # Actor and marker classes enter as shape (x, 1000), so use np.expand_dims to create an extra dimension at the end.
89
  # Return the concatenated array of shape (x, 1000, 5), which matches the original timeline dense cloud before
90
  # splitting it into sub arrays.
91
+
92
+ tdc = np.concatenate((np.expand_dims(actor_classes, -1),
93
+ np.expand_dims(marker_classes, -1),
94
+ append_zero(translation_vectors),
95
+ append_zero(rotation_vectors),
96
+ append_one(scale_vectors)), axis=2)
97
  if ordered:
98
  tdc = sort_cloud(tdc)
99
 
 
140
  return True
141
 
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  def get_child_node_by_name(parent_node: fbx.FbxNode, name: str, ignore_namespace: bool = False) \
144
  -> Union[fbx.FbxNode, None]:
145
  for c in range(parent_node.GetChildCount()):
 
171
  if actor_class == 0 or marker_class == 0:
172
  continue
173
 
174
+ # Just to be sure, forcing the last numbers of each array to be the correct values.
175
+ # Also check self.get_world_transform() for this.
176
+ translations = data[frame, node, 2:5] + np.array([0.0])
177
+ rotations = data[frame, node, 6:9] + np.array([0.0])
178
+ scales = data[frame, node, 10:13] + np.array([1.0])
179
+
180
+ world_matrix = fbx.FbxAMatrix()
181
+ world_matrix.SetT(fbx.FbxVector4(*translations))
182
+ world_matrix.SetR(fbx.FbxVector4(*rotations))
183
+ world_matrix.SetS(fbx.FbxVector4(*scales))
184
 
185
  # Create the actor dictionary if it doesn't exist.
186
  if actor_class not in result:
 
191
  result[actor_class][marker_class] = {}
192
 
193
  # Add the frame number and translation vector to the node dictionary.
194
+ result[actor_class][marker_class][frame + start_frame] = world_matrix
195
 
196
  return result
197
 
198
 
199
+ def world_to_local_transform(node, world_transform, frame):
200
  t = fbx.FbxTime()
201
+ t.SetFrame(frame)
202
+ if node.GetParent():
203
+ # Get the parent's world transform
204
+ parent_world_transform = node.GetParent().EvaluateGlobalTransform(t)
 
 
 
205
 
206
+ # Compute the inverse of the parent's world transform
207
+ parent_world_transform_inv = parent_world_transform.Inverse()
 
 
 
208
 
209
+ # Multiply the inverse of the parent's world transform by the world transform to get the local transform
210
+ lcl = parent_world_transform_inv * world_transform
211
+ else:
212
+ # If the node doesn't have a parent, the local transform is the same as the world transform
213
+ lcl = world_transform
214
 
215
+ return [lcl.GetT()[t] for t in range(3)], [lcl.GetR()[r] for r in range(3)], [lcl.GetS()[s] for s in range(3)]
216
 
217
 
218
  class FBXContainer:
 
219
  def __init__(self, fbx_file: Path,
220
  volume_dims: Tuple[float] = (10., 4., 10.),
221
  max_actors: int = 10,
 
452
  z /= self.vol_z
453
  y = np.array(y) / self.vol_y
454
 
455
+ # TODO: Optional: Add any extra modifications to the point cloud here.
456
 
457
  # Append all values to a new array, one axis at a time.
458
  # This way it will match the column names order.
 
601
 
602
  return pd.DataFrame(all_poses, columns=columns)
603
 
604
+ def get_world_transform(self, m: fbx.FbxNode, time: fbx.FbxTime, apply_transform: bool = True) -> List[float]:
605
  """
606
  Evaluates the world translation of the given marker at the given time,
607
  scales it down by scale and turns it into a vector list.
 
610
  :param apply_transform: `bool` Whether to transform the translation or not.
611
  :return: Vector in the form: [tx, ty, tz].
612
  """
613
+ world = m.EvaluateGlobalTransform(time)
614
+ world = list(world.GetT()) + list(world.GetR()) + list(world.GetS())
615
+ # Make sure that the last numbers of each row are the correct values.
616
+ world[3] = 0.0
617
+ world[7] = 0.0
618
+ world[11] = 1.0
619
+
620
  if not apply_transform:
621
+ return world
622
 
623
  # First multiply by self.scale, which turns meters to centimeters.
624
  # Then divide by volume dimensions, to normalize to the total area of the capture volume.
625
+ world[2] = np.clip(world[2], -(self.vol_x * 0.5), self.vol_x * 0.5) * self.scale / self.vol_x
626
+ world[3] = np.clip(world[3], -(self.vol_y * 0.5), self.vol_y * 0.5) * self.scale / self.vol_y
627
+ world[4] = np.clip(world[4], -(self.vol_z * 0.5), self.vol_z * 0.5) * self.scale / self.vol_z
628
 
629
+ return world
630
 
631
  def is_kf_present(self, marker: fbx.FbxNode, time: fbx.FbxTime) -> bool:
632
  """
 
653
  # because by adding the labeled markers after (which use classes 1-74),
654
  # we eventually return an array that doesn't need to be sorted anymore.
655
  cloud = [
656
+ [0, 0, *self.get_world_transform(m, time, apply_transform)]
657
  for m in self.unlabeled_markers
658
  if self.is_kf_present(m, time)
659
  ]
 
664
  # This actor's point cloud is made up of all markers that have a keyframe at the given time.
665
  # For each marker, we create this row: [actor class (index+1), marker class (index+1), tx, ty, tz].
666
  # We use index+1 because the unlabeled markers will use index 0 for both classes.
667
+ [actor_idx + 1, marker_class, *self.get_world_transform(m, time, apply_transform)]
668
  for marker_class, (marker_name, m) in enumerate(
669
  self.markers[actor_idx].items(), start=1
670
  )
 
688
  """
689
  return np.array([self.get_sc(f, apply_transform) for f in self.get_frame_range()])
690
 
691
+ def get_tdc(self, r: Union[int, Tuple[int, int]] = None,
692
+ shuffle: bool = False,
693
  apply_transform: bool = True) -> np.array:
694
  """
695
  For each frame in the frame range, collects the point cloud that is present in the file.
696
  Then it creates a ghost cloud of random markers that are treated as unlabeled markers,
697
  and adds them together to create a dense cloud whose shape is always (self.pc_size, 5).
698
  Optionally shuffles this dense cloud before adding it to the final list.
 
699
  :param r: tuple of `int` that indicates the frame range to get. Default is None,
700
  resulting in the animation frame range.
701
+ :param shuffle: If `True`, shuffles the dense point cloud of each frame.
702
  :param apply_transform: `bool` Whether to transform the translation or not.
703
  :return: `np.array` that contains a dense point cloud for each frame,
704
  with a shape of (self.num_frames, self.pc_size, 5).
 
737
  return np.array(clouds)
738
 
739
  def split_tdc(self, cloud: np.array = None, shuffle: bool = False, apply_transform: bool = True) \
740
+ -> Tuple[np.array, np.array, np.array, np.array, np.array]:
741
  """
742
  Splits a timeline dense cloud with shape (self.num_frames, self.pc_size, 5) into 3 different
743
  arrays:
744
  1. A `np.array` with the actor classes as shape (self.num_frames, self.pc_size, 1).
745
  2. A `np.array` with the marker classes as shape (self.num_frames, self.pc_size, 1).
746
+ 3. A `np.array` with the translation floats as shape (self.num_frames, self.pc_size, 4).
747
+ 4. A `np.array` with the rotation Euler angles as shape (self.num_frames, self.pc_size, 3).
748
  :param cloud: `np.array` of shape (self.num_frames, self.pc_size, 5) that contains a dense point cloud
749
  (self.pc_size, 5) per frame in the frame range.
750
  :param shuffle: `bool` whether to shuffle the generated cloud if no cloud was given.
 
752
  :return: Return tuple of `np.array` as (actor classes, marker classes, translation vectors).
753
  """
754
  if cloud is None:
755
+ cloud = self.get_tdc(shuffle=shuffle, apply_transform=apply_transform)
756
 
757
+ if cloud.shape[1] != 1000:
758
+ raise ValueError(f"Dense cloud doesn't have enough points. {cloud.shape[1]}/1000.")
759
+ if cloud.shape[2] != 14:
760
+ raise ValueError(f"Dense cloud is missing columns: {cloud.shape[2]}/14.")
761
 
762
+ # Return np arrays as (actor classes, marker classes, translation vectors, rotation vectors, scale vectors).
763
+ return cloud[:, :, 0], cloud[:, :, 1], cloud[:, :, 2:5], cloud[:, :, 6:9], cloud[:, :, 10:13]
764
 
765
  def convert_class_to_actor(self, c: float = 0):
766
  """
 
859
  self.remove_unlabeled_markers()
860
  self.remove_system()
861
 
862
+ def replace_animation_curves(self, node, curve_types):
863
+ anim_curve_dict = {
864
+ 't': [
865
+ ("X", node.LclTranslation),
866
+ ("Y", node.LclTranslation),
867
+ ("Z", node.LclTranslation)
868
+ ],
869
+ 'r': [
870
+ ("X", node.LclRotation),
871
+ ("Y", node.LclRotation),
872
+ ("Z", node.LclRotation)
873
+ ],
874
+ 's': [
875
+ ("X", node.LclScaling),
876
+ ("Y", node.LclScaling),
877
+ ("Z", node.LclScaling)
878
+ ]
879
+ }
880
+
881
+ anim_curves = []
882
+ for curve_type in curve_types:
883
+ if curve_type in anim_curve_dict:
884
+ for anim_curve_name, p in anim_curve_dict[curve_type]:
885
+ # Disconnect and remove existing animation curve, if any
886
+ existing_anim_curve = p.GetCurve(self.anim_layer, anim_curve_name, False)
887
+ # TODO: Make sure a new anim curve is properly connected.
888
+ if existing_anim_curve:
889
+ existing_anim_curve.KeyClear()
890
+ anim_curves.append(existing_anim_curve)
891
+ # p.DisconnectSrcObject(existing_anim_curve)
892
+ # existing_anim_curve.Destroy()
893
+ # del existing_anim_curve
894
+ #
895
+ # # Create a new animation curve and connect it to the node and animation layer
896
+ # new_anim_curve = fbx.FbxAnimCurve.Create(self.manager, anim_curve_name)
897
+ # p.ConnectSrcObject(new_anim_curve)
898
+ # new_anim_curve.ConnectDstObject(self.anim_layer)
899
+ #
900
+ # anim_curves.append(new_anim_curve)
901
+
902
+ return anim_curves
903
+
904
  def replace_keyframes_per_marker(self, marker: fbx.FbxNode, marker_keys: dict) -> None:
905
+
906
+ # Collect lcl transform curves.
907
+ curves = self.replace_animation_curves(marker, 't')
908
+
909
+ # TODO: Only set translation keys. Set rotation and scale as property values instead of curves.
910
+ for axis, curve in enumerate(curves):
911
+
912
  curve.KeyModifyBegin()
 
 
913
 
914
+ # The dict has frames mapped to world matrices.
915
+ # The world_transform here is that full matrix, so we only need to convert this to local space.
916
+ for frame, world_transform in marker_keys.items():
917
+ # Convert world to local transform at the given frame.
918
+ lcl_t, lcl_r, lcl_s = world_to_local_transform(marker, world_transform, frame)
919
+ # Only for translations, set keyframes.
920
+ create_keyframe(curve, frame, lcl_t[axis])
921
 
 
 
922
  curve.KeyModifyEnd()
923
 
924
  def replace_keyframes_per_actor(self, actor: int, actor_keys: dict) -> None:
 
931
 
932
 
933
  # d = FBXContainer(Path('G:/Firestorm/mocap-ai/data/fbx/dowg/TAKE_01+1_ALL_001.fbx'))
934
+ # # cloud = d.get_tdc(apply_transform=False)
935
+ # actors_train, markers_train, t_train, r_train, s_train = d.split_tdc(apply_transform=True)
936
+ # actors_test, markers_test, t_test, r_test, s_test = d.split_tdc(apply_transform=False)
937
+ # # splits = d.split_tdc(apply_transform=False)
938
+ # merged = merge_tdc(actors_train, markers_train, t_test, r_test, s_test)
939
+ # pc_dict = timeline_cloud_to_dict(merged, d.start_frame)
940
+ # d.replace_keyframes_for_all_actors(pc_dict)
941
+ # # d.cleanup()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
942
  # d.export_fbx(Path('G:/Firestorm/mocap-ai/data/fbx/export/TAKE_01+1_ALL_001.fbx'))