Natsha commited on
Commit
26d5eb9
·
1 Parent(s): 4631d6a

The app now allows you to select FBX files to process.

Browse files
Files changed (4) hide show
  1. app.py +63 -5
  2. globals.py +15 -0
  3. labeler/read_fbx.py +230 -0
  4. requirements.txt +3 -1
app.py CHANGED
@@ -1,8 +1,66 @@
1
- import fbx
 
 
 
 
 
2
  import streamlit as st
3
 
4
- st.title('Optical Motion Capture AI')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- mng = fbx.FbxManager.Create()
7
- st.write('Successfully loaded the fbx module:')
8
- st.write(mng)
 
 
1
+ # Import standard libs.
2
+ import tempfile
3
+ import os
4
+ from pathlib import Path
5
+
6
+ # Import primary libs.
7
  import streamlit as st
8
 
9
+ # Import custom libs.
10
+ from labeler import read_fbx
11
+
12
+
13
+ def process_file(file: Path) -> bytes:
14
+ fbx_content = read_fbx.MarkerData(file)
15
+ return fbx_content.export(t='string')
16
+
17
+
18
+ # Initialize session state variables if they don't exist
19
+ if "uploaded_files" not in st.session_state:
20
+ st.session_state.uploaded_files = {}
21
+ if "processed_files" not in st.session_state:
22
+ st.session_state.processed_files = {}
23
+
24
+ st.title('Optical MoCap AI Processing')
25
+
26
+ st.write('Select FBX files to upload and process. This will extract all marker animation data and turn it into a csv.')
27
+
28
+ new_uploaded_files = st.file_uploader('Select FBX files', accept_multiple_files=True, type='fbx', label_visibility='collapsed')
29
+
30
+ for uploaded_file in new_uploaded_files:
31
+ if uploaded_file.name not in st.session_state.processed_files.keys():
32
+ st.session_state.uploaded_files[uploaded_file.name] = uploaded_file
33
+
34
+ if st.session_state.uploaded_files and st.button("Process Files"):
35
+ progress_bar = st.progress(0)
36
+ # Create a temporary directory to store the newly uploaded files
37
+ with tempfile.TemporaryDirectory() as temp_dir:
38
+ incr = 1. / len(st.session_state.uploaded_files)
39
+
40
+ for idx, (name, uploaded_file) in enumerate(st.session_state.uploaded_files.items()):
41
+ # Save the uploaded file to the temporary directory
42
+ temp_path = Path(os.path.join(temp_dir, name))
43
+ with open(temp_path, "wb") as f:
44
+ f.write(uploaded_file.getbuffer())
45
+ print(f'[LOAD FBX] Finished uploading {temp_path}.')
46
+ # Process the file and append the resulting DataFrame to dataframes
47
+ st.session_state.processed_files[name] = process_file(temp_path)
48
+ progress_bar.progress((idx+1) * incr, f'Processing {name}')
49
+
50
+ st.session_state.uploaded_files = {}
51
+ st.experimental_rerun()
52
+
53
+ for name in list(st.session_state.processed_files.keys()):
54
+ new_file_name = name.replace('.fbx', '.csv')
55
+ if st.download_button(
56
+ label=f"Download {new_file_name}",
57
+ data=st.session_state.processed_files[name],
58
+ file_name=new_file_name,
59
+ mime='text/csv'):
60
+ del st.session_state.processed_files[name]
61
+ st.experimental_rerun()
62
 
63
+ if st.button('Delete cache', type='primary'):
64
+ st.session_state.uploaded_files = {}
65
+ st.session_state.processed_files = {}
66
+ st.experimental_rerun()
globals.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_time_modes():
2
+ return ('eDefaultMode', 'eFrames120', 'eFrames100', 'eFrames60', 'eFrames50', 'eFrames48',
3
+ 'eFrames30', 'eFrames30Drop', 'eNTSCDropFrame', 'eNTSCFullFrame', 'ePAL',
4
+ 'eFrames24', 'eFrames1000', 'eFilmFullFrame', 'eCustom', 'eFrames96', 'eFrames72',
5
+ 'eFrames59dot94', 'eFrames119dot88')
6
+
7
+
8
+ def get_marker_names():
9
+ return ('ARIEL', 'LFHD', 'LBHD', 'RFHD', 'RBHD', 'C7', 'T10', 'CLAV', 'STRN', 'LFSH', 'LBSH',
10
+ 'LUPA', 'LELB', 'LIEL', 'LFRM', 'LIWR', 'LOWR', 'LIHAND', 'LOHAND', 'LTHM3', 'LTHM6',
11
+ 'LIDX3', 'LIDX6', 'LMID0', 'LMID6', 'LRNG3', 'LRNG6', 'LPNK3', 'LPNK6', 'RFSH', 'RBSH',
12
+ 'RUPA', 'RELB', 'RIEL', 'RFRM', 'RIWR', 'ROWR', 'RIHAND', 'ROHAND', 'RTHM3', 'RTHM6',
13
+ 'RIDX3', 'RIDX6', 'RMID0', 'RMID6', 'RRNG3', 'RRNG6', 'RPNK3', 'RPNK6', 'LFWT', 'MFWT',
14
+ 'RFWT', 'LBWT', 'MBWT', 'RBWT', 'LTHI', 'LKNE', 'LKNI', 'LSHN', 'LANK', 'LHEL', 'LMT5',
15
+ 'LMT1', 'LTOE', 'RTHI', 'RKNE', 'RKNI', 'RSHN', 'RANK', 'RHEL', 'RMT5', 'RMT1', 'RTOE')
labeler/read_fbx.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from pathlib import Path
4
+
5
+ import contextlib
6
+ import fbx
7
+ from typing import List, Union
8
+
9
+ # Import custom data.
10
+ import globals
11
+
12
+
13
+ class MarkerData:
14
+ # TODO: Model is currently built for training. Add testing mode.
15
+ def __init__(self, fbx_file: Path):
16
+ """
17
+ Class that stores references to important nodes in an FBX file.
18
+ Offers utility functions to quickly load animation data.
19
+ :param fbx_file: `str` Path to the file to load.
20
+ """
21
+ self.time_modes = globals.get_time_modes()
22
+ self.marker_names = globals.get_marker_names()
23
+
24
+ self.markers = []
25
+ self.actor_names = []
26
+ self.actors = []
27
+
28
+ self.volume_dim_x = 10.
29
+ self.volume_dim_y = 4.
30
+
31
+ self.fbx_file = fbx_file
32
+ self.valid_frames = []
33
+
34
+ self.__init_scene()
35
+ self.__init_anim()
36
+ self.__init_actors()
37
+ self.__init_markers()
38
+
39
+ def __init_scene(self):
40
+ # Create an FBX manager and importer
41
+ manager = fbx.FbxManager.Create()
42
+ importer = fbx.FbxImporter.Create(manager, '')
43
+
44
+ # Import the FBX file
45
+ importer.Initialize(str(self.fbx_file))
46
+ self.scene = fbx.FbxScene.Create(manager, '')
47
+ importer.Import(self.scene)
48
+ self.root = self.scene.GetRootNode()
49
+ self.time_mode = self.scene.GetGlobalSettings().GetTimeMode()
50
+
51
+ # Destroy importer to remove reference to imported file.
52
+ # This will allow us to delete the uploaded file.
53
+ importer.Destroy()
54
+
55
+ def __init_anim(self):
56
+ # Get the animation stack and layer.
57
+ anim_stack = self.scene.GetCurrentAnimationStack()
58
+ self.anim_layer = anim_stack.GetSrcObject(fbx.FbxCriteria.ObjectType(fbx.FbxAnimLayer.ClassId), 0)
59
+
60
+ # Find the total number of frames to expect from the local time span.
61
+ local_time_span = anim_stack.GetLocalTimeSpan()
62
+ self.num_frames = int(local_time_span.GetDuration().GetFrameCount(self.time_mode))
63
+
64
+ def __init_actors(self):
65
+
66
+ # Find all parent nodes (/System, /_Unlabeled_Markers, /Actor1, etc).
67
+ gen1_nodes = [self.root.GetChild(i) for i in range(self.root.GetChildCount())]
68
+ for gen1_node in gen1_nodes:
69
+ gen2_nodes = [gen1_node.GetChild(i) for i in
70
+ range(gen1_node.GetChildCount())] # Actor nodes (/Mimi/Hips, /Mimi/ARIEL, etc)
71
+
72
+ # If the first 3 marker names are children of this parent, it must be an actor.
73
+ if all(name in [node.GetName() for node in gen2_nodes] for name in self.marker_names[:4]):
74
+ self.actor_names.append(gen1_node.GetName())
75
+ self.actors.append(gen1_node)
76
+
77
+ self.actor_count = len(self.actors)
78
+ self.valid_frames = [[] for _ in range(self.actor_count)]
79
+
80
+ def __init_markers(self):
81
+ for actor_node in self.actors:
82
+ actor_markers = {}
83
+ for marker_name in self.marker_names:
84
+ for actor_idx in range(actor_node.GetChildCount()):
85
+ child = actor_node.GetChild(actor_idx)
86
+ child_name = child.GetName()
87
+ if child_name == marker_name:
88
+ actor_markers[child_name] = child
89
+
90
+ assert len(actor_markers) == len(self.marker_names), f'{actor_node.GetName()} does not have all markers.'
91
+
92
+ self.markers.append(actor_markers)
93
+
94
+ def _check_actor(self, actor: int = 0):
95
+ assert 0 <= actor <= self.actor_count, f'Actor number must be between 0 and {self.actor_count - 1}. ' \
96
+ f'It is {actor}.'
97
+
98
+ def _set_valid_frames_for_actor(self, actor: int = 0):
99
+ self._check_actor(actor)
100
+
101
+ frames = list(range(self.num_frames))
102
+ for marker_name in self.marker_names:
103
+ marker = self.markers[actor][marker_name]
104
+ t_curve = marker.LclTranslation.GetCurve(self.anim_layer, 'X')
105
+ keys = [t_curve.KeyGet(i).GetTime().GetFrameCount(self.time_mode) for i in range(t_curve.KeyGetCount())]
106
+ for frame in frames:
107
+ if frame not in keys:
108
+ with contextlib.suppress(ValueError):
109
+ frames.remove(frame)
110
+
111
+ self.valid_frames[actor] = frames
112
+
113
+ def _check_valid_frames(self, actor: int = 0):
114
+ if not len(self.valid_frames[actor]):
115
+ self._set_valid_frames_for_actor(actor)
116
+
117
+ def _modify_pose(self, actor, frame) -> List[float]:
118
+ # Set new frame to evaluate at.
119
+ time = fbx.FbxTime()
120
+ time.SetFrame(frame)
121
+ # Prepare arrays for each axis.
122
+ x, y, z = [], [], []
123
+
124
+ # For each marker, store the x, y and z global position.
125
+ for n, m in self.markers[actor].items():
126
+ t = m.EvaluateGlobalTransform(time).GetRow(3)
127
+ x += [t[0] * 0.01]
128
+ y += [t[1] * 0.01]
129
+ z += [t[2] * 0.01]
130
+
131
+ # Move the point cloud to the center of the x and y axes. This will put the actor in the middle.
132
+ x = self.center_axis(x)
133
+ z = self.center_axis(z)
134
+
135
+ # Move the actor to the middle of the volume floor by adding volume_dim_x/2 to x and z.
136
+ x += self.volume_dim_x / 2.
137
+ z += self.volume_dim_x / 2.
138
+
139
+ # Squeeze the actor into the 1x1 plane for the neural network by dividing the axes.
140
+ x /= self.volume_dim_x
141
+ z /= self.volume_dim_x
142
+ y = np.array(y) / self.volume_dim_y
143
+
144
+ # TODO: Optionally: Add any extra modifications to the point cloud here.
145
+
146
+ # Append all values to a new array, one axis at a time.
147
+ # This way it will match the column names order.
148
+ pose = []
149
+ for i in range(len(x)):
150
+ pose += [x[i]]
151
+ pose += [y[i]]
152
+ pose += [z[i]]
153
+ return pose
154
+
155
+ def get_marker_by_name(self, actor: int, name: str):
156
+ self._check_actor(actor)
157
+ return self.markers[actor][name]
158
+
159
+ def get_valid_frames_for_actor(self, actor: int = 0):
160
+ self._check_valid_frames(actor)
161
+ return self.valid_frames[actor]
162
+
163
+ def print_valid_frames_stats_for_actor(self, actor: int = 0):
164
+ self._check_actor(actor)
165
+ self._check_valid_frames(actor)
166
+
167
+ len_valid = len(self.valid_frames[actor])
168
+ ratio = (len_valid / self.num_frames) * 100
169
+ print(f'Actor {self.actor_names[actor]}: Total: {self.num_frames}, valid: {len_valid}, missing: '
170
+ f'{self.num_frames - len_valid}, ratio: {ratio:.2f}% valid.')
171
+
172
+ return self.actor_names[actor], self.num_frames, len_valid, ratio
173
+
174
+ def columns_from_joints(self):
175
+ columns = []
176
+ for name in self.marker_names:
177
+ columns += [f'{name}x', f'{name}y', f'{name}z']
178
+
179
+ return columns
180
+
181
+ @staticmethod
182
+ def center_axis(a) -> np.array:
183
+ a = np.array(a)
184
+ _min = np.min(a)
185
+ _max = np.max(a)
186
+
187
+ _c = _max - _min
188
+ a -= _c
189
+ return a
190
+
191
+ def extract_translations_per_actor(self, actor: int = 0):
192
+ self._check_actor(actor)
193
+ self._check_valid_frames(actor)
194
+
195
+ poses = []
196
+ # Go through all valid frames for this actor.
197
+ # Note that these frames can be different per actor.
198
+ for frame in self.valid_frames[actor]:
199
+ # Get the centered point cloud as an array.
200
+ pose_at_frame = self._modify_pose(actor, frame)
201
+ poses.append(pose_at_frame)
202
+
203
+ return poses
204
+
205
+ def extract_all_translations(self) -> pd.DataFrame:
206
+
207
+ columns = self.columns_from_joints()
208
+
209
+ all_poses = []
210
+
211
+ for i in range(self.actor_count):
212
+ all_poses.extend(self.extract_translations_per_actor(i))
213
+
214
+ return pd.DataFrame(all_poses, columns=columns)
215
+
216
+ def export(self, t: str = 'csv', output_file: Path = None) -> Union[bytes, Path]:
217
+ # Get the dataframe with all animation data.
218
+ df = self.extract_all_translations()
219
+
220
+ if t == 'string':
221
+ return df.to_csv(index=False).encode('utf-8')
222
+
223
+ if output_file is None:
224
+ output_file = self.fbx_file.with_suffix('.csv')
225
+
226
+ if output_file.suffix != '.csv':
227
+ raise ValueError(f'{output_file} needs to be a .csv file.')
228
+
229
+ df.to_csv(output_file, index=False)
230
+ return output_file
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- streamlit
 
 
 
1
+ streamlit~=1.21.0
2
+ pandas~=1.3.5
3
+ numpy~=1.21.5