kleinhe commited on
Commit
c3d0293
·
0 Parent(s):
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. .gitignore +4 -0
  3. LICENSE +21 -0
  4. SMPLX/__pycache__/joints2smpl.cpython-310.pyc +0 -0
  5. SMPLX/__pycache__/joints2smpl.cpython-39.pyc +0 -0
  6. SMPLX/__pycache__/read_from_npy.cpython-310.pyc +0 -0
  7. SMPLX/__pycache__/read_from_npy.cpython-311.pyc +0 -0
  8. SMPLX/__pycache__/read_from_npy.cpython-39.pyc +0 -0
  9. SMPLX/__pycache__/read_joints_from_pose.cpython-39.pyc +0 -0
  10. SMPLX/__pycache__/rotation_conversions.cpython-310.pyc +0 -0
  11. SMPLX/__pycache__/rotation_conversions.cpython-311.pyc +0 -0
  12. SMPLX/__pycache__/rotation_conversions.cpython-39.pyc +0 -0
  13. SMPLX/__pycache__/transfer_smpls.cpython-39.pyc +0 -0
  14. SMPLX/__pycache__/visual_amass.cpython-39.pyc +0 -0
  15. SMPLX/__pycache__/visualize.cpython-38.pyc +0 -0
  16. SMPLX/config_files/smpl2smplh.yaml +25 -0
  17. SMPLX/config_files/smpl2smplx.yaml +26 -0
  18. SMPLX/config_files/smplh2smpl.yaml +24 -0
  19. SMPLX/config_files/smplh2smplx.yaml +26 -0
  20. SMPLX/config_files/smplh2smplx_as.yaml +26 -0
  21. SMPLX/config_files/smplh2smplx_onepose.yaml +27 -0
  22. SMPLX/config_files/smplx2smpl.yaml +25 -0
  23. SMPLX/config_files/smplx2smplh.yaml +27 -0
  24. SMPLX/joints2smpl.py +59 -0
  25. SMPLX/read_from_npy.py +108 -0
  26. SMPLX/read_joints_from_pose.py +110 -0
  27. SMPLX/rotation_conversions.py +532 -0
  28. SMPLX/smplx/__init__.py +30 -0
  29. SMPLX/smplx/__pycache__/__init__.cpython-310.pyc +0 -0
  30. SMPLX/smplx/__pycache__/__init__.cpython-311.pyc +0 -0
  31. SMPLX/smplx/__pycache__/__init__.cpython-39.pyc +0 -0
  32. SMPLX/smplx/__pycache__/body_models.cpython-310.pyc +0 -0
  33. SMPLX/smplx/__pycache__/body_models.cpython-311.pyc +0 -0
  34. SMPLX/smplx/__pycache__/body_models.cpython-39.pyc +0 -0
  35. SMPLX/smplx/__pycache__/joint_names.cpython-39.pyc +0 -0
  36. SMPLX/smplx/__pycache__/lbs.cpython-310.pyc +0 -0
  37. SMPLX/smplx/__pycache__/lbs.cpython-311.pyc +0 -0
  38. SMPLX/smplx/__pycache__/lbs.cpython-39.pyc +0 -0
  39. SMPLX/smplx/__pycache__/utils.cpython-310.pyc +0 -0
  40. SMPLX/smplx/__pycache__/utils.cpython-311.pyc +0 -0
  41. SMPLX/smplx/__pycache__/utils.cpython-39.pyc +0 -0
  42. SMPLX/smplx/__pycache__/vertex_ids.cpython-310.pyc +0 -0
  43. SMPLX/smplx/__pycache__/vertex_ids.cpython-311.pyc +0 -0
  44. SMPLX/smplx/__pycache__/vertex_ids.cpython-39.pyc +0 -0
  45. SMPLX/smplx/__pycache__/vertex_joint_selector.cpython-310.pyc +0 -0
  46. SMPLX/smplx/__pycache__/vertex_joint_selector.cpython-311.pyc +0 -0
  47. SMPLX/smplx/__pycache__/vertex_joint_selector.cpython-39.pyc +0 -0
  48. SMPLX/smplx/body_models.py +0 -0
  49. SMPLX/smplx/joint_names.py +320 -0
  50. SMPLX/smplx/lbs.py +405 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ body_models
2
+ results
3
+ weights
4
+ tada-extend
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 xin he
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
SMPLX/__pycache__/joints2smpl.cpython-310.pyc ADDED
Binary file (1.71 kB). View file
 
SMPLX/__pycache__/joints2smpl.cpython-39.pyc ADDED
Binary file (1.71 kB). View file
 
SMPLX/__pycache__/read_from_npy.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
SMPLX/__pycache__/read_from_npy.cpython-311.pyc ADDED
Binary file (6.76 kB). View file
 
SMPLX/__pycache__/read_from_npy.cpython-39.pyc ADDED
Binary file (2.76 kB). View file
 
SMPLX/__pycache__/read_joints_from_pose.cpython-39.pyc ADDED
Binary file (4.24 kB). View file
 
SMPLX/__pycache__/rotation_conversions.cpython-310.pyc ADDED
Binary file (16.8 kB). View file
 
SMPLX/__pycache__/rotation_conversions.cpython-311.pyc ADDED
Binary file (24.7 kB). View file
 
SMPLX/__pycache__/rotation_conversions.cpython-39.pyc ADDED
Binary file (16.8 kB). View file
 
SMPLX/__pycache__/transfer_smpls.cpython-39.pyc ADDED
Binary file (4.13 kB). View file
 
SMPLX/__pycache__/visual_amass.cpython-39.pyc ADDED
Binary file (5.6 kB). View file
 
SMPLX/__pycache__/visualize.cpython-38.pyc ADDED
Binary file (3.31 kB). View file
 
SMPLX/config_files/smpl2smplh.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'transfer_data/meshes/smpl'
4
+ deformation_transfer_path: 'transfer_data/smpl2smplh_def_transfer.pkl'
5
+ mask_ids_fname: ''
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'trust-ncg'
13
+ maxiters: 100
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smplh"
18
+ # SMPL+H has no neutral model, so we have to manually select the gender
19
+ gender: "female"
20
+ # gender: "male"
21
+ folder: "transfer_data/body_models"
22
+ use_compressed: False
23
+ smplh:
24
+ betas:
25
+ num: 10
SMPLX/config_files/smpl2smplx.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'transfer_data/meshes/smpl'
4
+ deformation_transfer_path: 'transfer_data/smpl2smplx_deftrafo_setup.pkl'
5
+ mask_ids_fname: 'smplx_mask_ids.npy'
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'trust-ncg'
13
+ maxiters: 100
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smplx"
18
+ gender: "neutral"
19
+ folder: "transfer_data/body_models"
20
+ use_compressed: False
21
+ use_face_contour: True
22
+ smplx:
23
+ betas:
24
+ num: 10
25
+ expression:
26
+ num: 10
SMPLX/config_files/smplh2smpl.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'transfer_data/meshes/smplh'
4
+ deformation_transfer_path: 'transfer_data/smplh2smpl_def_transfer.pkl'
5
+ mask_ids_fname: ''
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'trust-ncg'
13
+ maxiters: 100
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smpl"
18
+ gender: "neutral"
19
+ folder: "transfer_data/body_models"
20
+ use_compressed: False
21
+ use_face_contour: True
22
+ smpl:
23
+ betas:
24
+ num: 10
SMPLX/config_files/smplh2smplx.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'transfer_data/meshes/smplh'
4
+ deformation_transfer_path: 'transfer_data/smplh2smplx_deftrafo_setup.pkl'
5
+ mask_ids_fname: 'smplx_mask_ids.npy'
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'trust-ncg'
13
+ maxiters: 100
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smplx"
18
+ gender: "neutral"
19
+ folder: "transfer_data/body_models"
20
+ use_compressed: False
21
+ use_face_contour: True
22
+ smplx:
23
+ betas:
24
+ num: 10
25
+ expression:
26
+ num: 10
SMPLX/config_files/smplh2smplx_as.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'transfer_data/meshes/amass_sample'
4
+ deformation_transfer_path: 'transfer_data/smplh2smplx_deftrafo_setup.pkl'
5
+ mask_ids_fname: 'smplx_mask_ids.npy'
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'trust-ncg'
13
+ maxiters: 100
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smplx"
18
+ gender: "male"
19
+ folder: "/data/TTA/data/body_models"
20
+ use_compressed: False
21
+ use_face_contour: True
22
+ smplx:
23
+ betas:
24
+ num: 10
25
+ expression:
26
+ num: 10
SMPLX/config_files/smplh2smplx_onepose.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'transfer_data/meshes/amass_onepose'
4
+ deformation_transfer_path: 'transfer_data/smplh2smplx_deftrafo_setup.pkl'
5
+ mask_ids_fname: 'smplx_mask_ids.npy'
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'adam'
13
+ lr: 0.1
14
+ maxiters: 10000
15
+ gtol: 1e-06
16
+
17
+ body_model:
18
+ model_type: "smplx"
19
+ gender: "neutral"
20
+ folder: "models"
21
+ use_compressed: False
22
+ use_face_contour: True
23
+ smplx:
24
+ betas:
25
+ num: 10
26
+ expression:
27
+ num: 10
SMPLX/config_files/smplx2smpl.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'meshes/smplx'
4
+ deformation_transfer_path: 'transfer_data/smplx2smpl_deftrafo_setup.pkl'
5
+ mask_ids_fname: ''
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'lbfgs'
13
+ maxiters: 200
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smpl"
18
+ gender: "neutral"
19
+ ext: 'pkl'
20
+ folder: "transfer_data/body_models"
21
+ use_compressed: False
22
+ use_face_contour: True
23
+ smpl:
24
+ betas:
25
+ num: 10
SMPLX/config_files/smplx2smplh.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets:
2
+ mesh_folder:
3
+ data_folder: 'meshes/smplx'
4
+ deformation_transfer_path: 'transfer_data/smplx2smplh_deftrafo_setup.pkl'
5
+ mask_ids_fname: ''
6
+ summary_steps: 100
7
+
8
+ edge_fitting:
9
+ per_part: False
10
+
11
+ optim:
12
+ type: 'lbfgs'
13
+ maxiters: 200
14
+ gtol: 1e-06
15
+
16
+ body_model:
17
+ model_type: "smplh"
18
+ # SMPL+H has no neutral model, so we have to manually select the gender
19
+ gender: "female"
20
+ # gender: "male"
21
+ ext: 'pkl'
22
+ folder: "transfer_data/body_models"
23
+ use_compressed: False
24
+ use_face_contour: True
25
+ smplh:
26
+ betas:
27
+ num: 10
SMPLX/joints2smpl.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from SMPLX.visualize_joint2smpl.simplify_loc2rot import joints2smpl
3
+ import argparse
4
+ import numpy as np
5
+ import os
6
+ from tqdm import tqdm
7
+
8
+ parser = argparse.ArgumentParser(description='transfer joint3d to smpls')
9
+ parser.add_argument("--model_path", default="/data/TTA/data/body_models")
10
+ parser.add_argument('--source_path', default="/data/TTA/data/humanact12/group_000")
11
+ parser.add_argument("--target_path", default="/data/TTA/data/humanact_smplh/group_000")
12
+ parser.add_argument("--mode", default="joints", choices=["t2m", "joints"])
13
+ args = parser.parse_args()
14
+ device = "cuda"
15
+
16
+ if os.path.isdir(args.source_path):
17
+ os.makedirs(args.target_path, exist_ok=True)
18
+ files = os.listdir(args.source_path)
19
+ target_files = files
20
+ else:
21
+ files = [args.source_path]
22
+ args.source_path = ""
23
+
24
+ if args.target_path.split(".")[-1] != "npy":
25
+ os.makedirs(args.target_path)
26
+ target_files = [files[0].split("/")[-1]]
27
+ else:
28
+ target_files = [args.target_path]
29
+ args.target_path = ""
30
+
31
+ for i in range(len(files)):
32
+ curr_path = os.path.join(args.source_path, files[i])
33
+ target_path = os.path.join(args.target_path, target_files[i])
34
+ if os.path.exists(target_path):
35
+ continue
36
+
37
+ curr_file = np.load(curr_path) #### [nframe, 263]
38
+ curr_file = torch.from_numpy(curr_file)
39
+
40
+ if args.mode == "t2m":
41
+ from dataset.t2m.recover_joints import recover_from_ric
42
+ motions = recover_from_ric(curr_file, 22) #### [nframes, 22, 3]
43
+ motions = motions.detach().cpu().numpy()
44
+ else:
45
+ motions = curr_file.detach().cpu().numpy()
46
+
47
+ frames, njoints, nfeats = motions.shape
48
+ MINS = motions.min(axis=0).min(axis=0)
49
+ MAXS = motions.max(axis=0).max(axis=0)
50
+ height_offset = MINS[1]
51
+ motions[:, :, 1] -= height_offset
52
+ model = joints2smpl(frames, 0, True, model_path=args.model_path)
53
+ target, trans = model.joint2smpl(motions)
54
+
55
+ target = np.concatenate([target, trans], axis=1)
56
+
57
+ np.save(target_path, target)
58
+ if i % 10 == 0:
59
+ print("save %d npys"%(i))
SMPLX/read_from_npy.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ def npy2info(motions, num_shapes=10):
5
+ if isinstance(motions, str):
6
+ motions = np.load(motions)
7
+
8
+ trans = None
9
+ gnum = 2
10
+
11
+ if isinstance(motions, np.ndarray):
12
+ betas = np.zeros([motions.shape[0], num_shapes]).astype(motions.dtype)
13
+ else:
14
+ betas = torch.zeros([motions.shape[0], num_shapes], dtype=motions.dtype)
15
+
16
+ if len(motions.shape) == 3:
17
+ motions = motions.reshape(motions.shape[0], -1)
18
+
19
+ if motions.shape[1] in [73, 157, 166]:
20
+ gnum = motions[:, -1:][0]
21
+ motions = motions[:, :-1]
22
+ elif motions.shape[1] in [75, 159, 168]:
23
+ gnum = 2
24
+ trans = motions[:, -3::]
25
+ motions = motions[:, :-3]
26
+ elif motions.shape[1] in [76, 160, 169]:
27
+ gnum = motions[:, -1:][0]
28
+ trans = motions[:, -4:-1:]
29
+ motions = motions[:, :-4]
30
+ elif motions.shape[1] in [72 + num_shapes, 156 + num_shapes, 165 + num_shapes]:
31
+ betas = motions[:, -num_shapes::]
32
+ gnum = 2
33
+ motions = motions[:, :-num_shapes]
34
+ elif motions.shape[1] in [73 + num_shapes, 157 + num_shapes, 166 + num_shapes]:
35
+ betas = motions[:, -num_shapes::]
36
+ gnum = motions[:, -num_shapes-1:-num_shapes:][0]
37
+ motions = motions[:, :-num_shapes-1]
38
+ elif motions.shape[1] in [75 + num_shapes, 159 + num_shapes, 168 + num_shapes]:
39
+ betas = motions[:, -num_shapes::]
40
+ gnum = 2
41
+ trans = motions[:, -num_shapes-3:-num_shapes:]
42
+ motions = motions[:, :-num_shapes-3]
43
+ elif motions.shape[1] in [76 + num_shapes, 160 + num_shapes, 169 + num_shapes]:
44
+ betas = motions[:, -num_shapes::]
45
+ gnum = motions[:, -num_shapes-1:-num_shapes:][0]
46
+ trans = motions[:, -num_shapes-4:-num_shapes-1:]
47
+ motions = motions[:, :-num_shapes-4]
48
+
49
+ if gnum == 0:
50
+ gender = "female"
51
+ elif gnum == 1:
52
+ gender = "male"
53
+ else:
54
+ gender = "neutral"
55
+
56
+ return motions, trans, gender, betas
57
+
58
+ def info2dict(pose, trans=None, betas=None, mode="smpl", device="cuda", index=-1):
59
+ if isinstance(pose, np.ndarray):
60
+ pose = torch.from_numpy(pose)
61
+
62
+ if trans is not None and isinstance(trans, np.ndarray):
63
+ trans = torch.from_numpy(trans)
64
+
65
+ if betas is not None and isinstance(betas, np.ndarray):
66
+ betas = torch.from_numpy(betas)
67
+ elif betas is None:
68
+ betas = torch.zeros([pose.shape[0], 10])
69
+
70
+ if index != -1:
71
+ pose = pose[index:index+1]
72
+
73
+ if trans is not None:
74
+ trans = trans[index:index+1]
75
+
76
+ betas = betas[index:index+1]
77
+
78
+ if mode == "smplx":
79
+ inputs = {
80
+ "global_orient": pose[:, :3].float().to(device),
81
+ "body_pose": pose[:, 3:66].float().to(device),
82
+ "jaw_pose": pose[:, 66:69].float().to(device),
83
+ "leye_pose": pose[:, 69:72].float().to(device),
84
+ "reye_pose": pose[:, 72:75].float().to(device),
85
+ "left_hand_pose":pose[:, 75:120].float().to(device),
86
+ "right_hand_pose":pose[:, 120:].float().to(device),
87
+ }
88
+ elif mode == "smplh":
89
+ inputs = {
90
+ "global_orient": pose[:, :3].float().to(device),
91
+ "body_pose": pose[:, 3:66].float().to(device),
92
+ "left_hand_pose":pose[:, 66:111].float().to(device),
93
+ "right_hand_pose":pose[:, 111:].float().to(device),
94
+ }
95
+ elif mode == "smpl":
96
+ inputs = {
97
+ "global_orient": pose[:, :3].float().to(device),
98
+ "body_pose": pose[:, 3:].float().to(device),
99
+ }
100
+
101
+ if trans is not None:
102
+ inputs["transl"] = trans[:, :].float().to(device)
103
+ else:
104
+ print("No Translation Information")
105
+
106
+ inputs["betas"] = betas[:, :].float().to(device)
107
+
108
+ return inputs
SMPLX/read_joints_from_pose.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from torch import nn
4
+ import pickle as pkl
5
+ import torch.nn.functional as F
6
+
7
+ class Struct(object):
8
+ def __init__(self, **kwargs):
9
+ for key, val in kwargs.items():
10
+ setattr(self, key, val)
11
+
12
+
13
+ def to_np(array, dtype=np.float32):
14
+ if 'scipy.sparse' in str(type(array)):
15
+ array = array.todense()
16
+ return np.array(array, dtype=dtype)
17
+
18
+
19
+ class Get_Joints(nn.Module):
20
+ def __init__(self, path, batch_size=300) -> None:
21
+ super().__init__()
22
+ self.betas = nn.parameter.Parameter(torch.zeros([batch_size, 10], dtype=torch.float32), requires_grad=False)
23
+ with open(path, "rb") as f:
24
+ smpl_prior = pkl.load(f, encoding="latin1")
25
+ data_struct = Struct(**smpl_prior)
26
+
27
+ self.v_template = nn.parameter.Parameter(torch.from_numpy(to_np(data_struct.v_template)), requires_grad=False)
28
+ self.shapedirs = nn.parameter.Parameter(torch.from_numpy(to_np(data_struct.shapedirs)), requires_grad=False)
29
+ self.J_regressor = nn.parameter.Parameter(torch.from_numpy(to_np(data_struct.J_regressor)), requires_grad=False)
30
+ posedirs = torch.from_numpy(to_np(data_struct.posedirs))
31
+ num_pose_basis = posedirs.shape[-1]
32
+ posedirs = posedirs.reshape([-1, num_pose_basis]).permute(1, 0)
33
+ self.posedirs = nn.parameter.Parameter(posedirs, requires_grad=False)
34
+ self.parents = nn.parameter.Parameter(torch.from_numpy(to_np(data_struct.kintree_table)[0]).long(), requires_grad=False)
35
+ self.parents[0] = -1
36
+
37
+ self.ident = nn.parameter.Parameter(torch.eye(3), requires_grad=False)
38
+ self.K = nn.parameter.Parameter(torch.zeros([1, 3, 3]), requires_grad=False)
39
+ self.zeros = nn.parameter.Parameter(torch.zeros([1, 1]), requires_grad=False)
40
+
41
+ def blend_shapes(self, betas, shape_disps):
42
+ blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
43
+ return blend_shape
44
+
45
+ def vertices2joints(self, J_regressor, vertices):
46
+ return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
47
+
48
+ def batch_rodrigues(
49
+ self,
50
+ rot_vecs,
51
+ epsilon = 1e-8,
52
+ ):
53
+ batch_size = rot_vecs.shape[0]
54
+ angle = torch.norm(rot_vecs + epsilon, dim=1, keepdim=True)
55
+ rot_dir = rot_vecs / angle
56
+ cos = torch.unsqueeze(torch.cos(angle), dim=1)
57
+ sin = torch.unsqueeze(torch.sin(angle), dim=1)
58
+ # Bx1 arrays
59
+ rx, ry, rz = torch.split(rot_dir, 1, dim=1)
60
+ K = self.K.repeat(batch_size, 1, 1)
61
+ zeros = self.zeros.repeat(batch_size, 1)
62
+ K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1).view((batch_size, 3, 3))
63
+ ident = self.ident.unsqueeze(0)
64
+ rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
65
+ return rot_mat
66
+
67
+ def transform_mat(self, R, t):
68
+ return torch.cat([F.pad(R, [0, 0, 0, 1]),
69
+ F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
70
+
71
+ def batch_rigid_transform(
72
+ self,
73
+ rot_mats,
74
+ joints,
75
+ parents,
76
+ ):
77
+ joints = torch.unsqueeze(joints, dim=-1)
78
+
79
+ rel_joints = joints.clone()
80
+ rel_joints[:, 1:] -= joints[:, parents[1:]]
81
+
82
+ transforms_mat = self.transform_mat(
83
+ rot_mats.reshape(-1, 3, 3),
84
+ rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
85
+
86
+ transform_chain = [transforms_mat[:, 0]]
87
+ for i in range(1, parents.shape[0]):
88
+ # Subtract the joint location at the rest pose
89
+ # No need for rotation, since it's identity when at rest
90
+ curr_res = torch.matmul(transform_chain[parents[i]],
91
+ transforms_mat[:, i])
92
+ transform_chain.append(curr_res)
93
+
94
+ transforms = torch.stack(transform_chain, dim=1)
95
+
96
+ # The last column of the transformations contains the posed joints
97
+ posed_joints = transforms[:, :, :3, 3]
98
+ return posed_joints
99
+
100
+ def forward(self, pose, trans=None):
101
+ pose = pose.float()
102
+ batch = pose.shape[0]
103
+ betas = self.betas[:batch]
104
+ v_shaped = self.v_template + self.blend_shapes(betas, self.shapedirs)
105
+ J = self.vertices2joints(self.J_regressor, v_shaped)
106
+ rot_mats = self.batch_rodrigues(pose.view(-1, 3)).view([batch, -1, 3, 3])
107
+ J_transformed = self.batch_rigid_transform(rot_mats, J, self.parents)
108
+ if trans is not None:
109
+ J_transformed += trans.unsqueeze(dim=1)
110
+ return J_transformed
SMPLX/rotation_conversions.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2
+ # Check PYTORCH3D_LICENCE before use
3
+
4
+ import functools
5
+ from typing import Optional
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+
10
+
11
+ """
12
+ The transformation matrices returned from the functions in this file assume
13
+ the points on which the transformation will be applied are column vectors.
14
+ i.e. the R matrix is structured as
15
+ R = [
16
+ [Rxx, Rxy, Rxz],
17
+ [Ryx, Ryy, Ryz],
18
+ [Rzx, Rzy, Rzz],
19
+ ] # (3, 3)
20
+ This matrix can be applied to column vectors by post multiplication
21
+ by the points e.g.
22
+ points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
23
+ transformed_points = R * points
24
+ To apply the same matrix to points which are row vectors, the R matrix
25
+ can be transposed and pre multiplied by the points:
26
+ e.g.
27
+ points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
28
+ transformed_points = points * R.transpose(1, 0)
29
+ """
30
+
31
+
32
+ def quaternion_to_matrix(quaternions):
33
+ """
34
+ Convert rotations given as quaternions to rotation matrices.
35
+ Args:
36
+ quaternions: quaternions with real part first,
37
+ as tensor of shape (..., 4).
38
+ Returns:
39
+ Rotation matrices as tensor of shape (..., 3, 3).
40
+ """
41
+ r, i, j, k = torch.unbind(quaternions, -1)
42
+ two_s = 2.0 / (quaternions * quaternions).sum(-1)
43
+
44
+ o = torch.stack(
45
+ (
46
+ 1 - two_s * (j * j + k * k),
47
+ two_s * (i * j - k * r),
48
+ two_s * (i * k + j * r),
49
+ two_s * (i * j + k * r),
50
+ 1 - two_s * (i * i + k * k),
51
+ two_s * (j * k - i * r),
52
+ two_s * (i * k - j * r),
53
+ two_s * (j * k + i * r),
54
+ 1 - two_s * (i * i + j * j),
55
+ ),
56
+ -1,
57
+ )
58
+ return o.reshape(quaternions.shape[:-1] + (3, 3))
59
+
60
+
61
+ def _copysign(a, b):
62
+ """
63
+ Return a tensor where each element has the absolute value taken from the,
64
+ corresponding element of a, with sign taken from the corresponding
65
+ element of b. This is like the standard copysign floating-point operation,
66
+ but is not careful about negative 0 and NaN.
67
+ Args:
68
+ a: source tensor.
69
+ b: tensor whose signs will be used, of the same shape as a.
70
+ Returns:
71
+ Tensor of the same shape as a with the signs of b.
72
+ """
73
+ signs_differ = (a < 0) != (b < 0)
74
+ return torch.where(signs_differ, -a, a)
75
+
76
+
77
+ def _sqrt_positive_part(x):
78
+ """
79
+ Returns torch.sqrt(torch.max(0, x))
80
+ but with a zero subgradient where x is 0.
81
+ """
82
+ ret = torch.zeros_like(x)
83
+ positive_mask = x > 0
84
+ ret[positive_mask] = torch.sqrt(x[positive_mask])
85
+ return ret
86
+
87
+
88
+ def matrix_to_quaternion(matrix):
89
+ """
90
+ Convert rotations given as rotation matrices to quaternions.
91
+ Args:
92
+ matrix: Rotation matrices as tensor of shape (..., 3, 3).
93
+ Returns:
94
+ quaternions with real part first, as tensor of shape (..., 4).
95
+ """
96
+ if matrix.size(-1) != 3 or matrix.size(-2) != 3:
97
+ raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
98
+ m00 = matrix[..., 0, 0]
99
+ m11 = matrix[..., 1, 1]
100
+ m22 = matrix[..., 2, 2]
101
+ o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
102
+ x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
103
+ y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
104
+ z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
105
+ o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
106
+ o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
107
+ o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
108
+ return torch.stack((o0, o1, o2, o3), -1)
109
+
110
+
111
+ def _axis_angle_rotation(axis: str, angle):
112
+ """
113
+ Return the rotation matrices for one of the rotations about an axis
114
+ of which Euler angles describe, for each value of the angle given.
115
+ Args:
116
+ axis: Axis label "X" or "Y or "Z".
117
+ angle: any shape tensor of Euler angles in radians
118
+ Returns:
119
+ Rotation matrices as tensor of shape (..., 3, 3).
120
+ """
121
+
122
+ cos = torch.cos(angle)
123
+ sin = torch.sin(angle)
124
+ one = torch.ones_like(angle)
125
+ zero = torch.zeros_like(angle)
126
+
127
+ if axis == "X":
128
+ R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
129
+ if axis == "Y":
130
+ R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
131
+ if axis == "Z":
132
+ R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
133
+
134
+ return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
135
+
136
+
137
+ def euler_angles_to_matrix(euler_angles, convention: str):
138
+ """
139
+ Convert rotations given as Euler angles in radians to rotation matrices.
140
+ Args:
141
+ euler_angles: Euler angles in radians as tensor of shape (..., 3).
142
+ convention: Convention string of three uppercase letters from
143
+ {"X", "Y", and "Z"}.
144
+ Returns:
145
+ Rotation matrices as tensor of shape (..., 3, 3).
146
+ """
147
+ if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
148
+ raise ValueError("Invalid input euler angles.")
149
+ if len(convention) != 3:
150
+ raise ValueError("Convention must have 3 letters.")
151
+ if convention[1] in (convention[0], convention[2]):
152
+ raise ValueError(f"Invalid convention {convention}.")
153
+ for letter in convention:
154
+ if letter not in ("X", "Y", "Z"):
155
+ raise ValueError(f"Invalid letter {letter} in convention string.")
156
+ matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
157
+ return functools.reduce(torch.matmul, matrices)
158
+
159
+
160
+ def _angle_from_tan(
161
+ axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
162
+ ):
163
+ """
164
+ Extract the first or third Euler angle from the two members of
165
+ the matrix which are positive constant times its sine and cosine.
166
+ Args:
167
+ axis: Axis label "X" or "Y or "Z" for the angle we are finding.
168
+ other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
169
+ convention.
170
+ data: Rotation matrices as tensor of shape (..., 3, 3).
171
+ horizontal: Whether we are looking for the angle for the third axis,
172
+ which means the relevant entries are in the same row of the
173
+ rotation matrix. If not, they are in the same column.
174
+ tait_bryan: Whether the first and third axes in the convention differ.
175
+ Returns:
176
+ Euler Angles in radians for each matrix in data as a tensor
177
+ of shape (...).
178
+ """
179
+
180
+ i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
181
+ if horizontal:
182
+ i2, i1 = i1, i2
183
+ even = (axis + other_axis) in ["XY", "YZ", "ZX"]
184
+ if horizontal == even:
185
+ return torch.atan2(data[..., i1], data[..., i2])
186
+ if tait_bryan:
187
+ return torch.atan2(-data[..., i2], data[..., i1])
188
+ return torch.atan2(data[..., i2], -data[..., i1])
189
+
190
+
191
+ def _index_from_letter(letter: str):
192
+ if letter == "X":
193
+ return 0
194
+ if letter == "Y":
195
+ return 1
196
+ if letter == "Z":
197
+ return 2
198
+
199
+
200
+ def matrix_to_euler_angles(matrix, convention: str):
201
+ """
202
+ Convert rotations given as rotation matrices to Euler angles in radians.
203
+ Args:
204
+ matrix: Rotation matrices as tensor of shape (..., 3, 3).
205
+ convention: Convention string of three uppercase letters.
206
+ Returns:
207
+ Euler angles in radians as tensor of shape (..., 3).
208
+ """
209
+ if len(convention) != 3:
210
+ raise ValueError("Convention must have 3 letters.")
211
+ if convention[1] in (convention[0], convention[2]):
212
+ raise ValueError(f"Invalid convention {convention}.")
213
+ for letter in convention:
214
+ if letter not in ("X", "Y", "Z"):
215
+ raise ValueError(f"Invalid letter {letter} in convention string.")
216
+ if matrix.size(-1) != 3 or matrix.size(-2) != 3:
217
+ raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
218
+ i0 = _index_from_letter(convention[0])
219
+ i2 = _index_from_letter(convention[2])
220
+ tait_bryan = i0 != i2
221
+ if tait_bryan:
222
+ central_angle = torch.asin(
223
+ matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
224
+ )
225
+ else:
226
+ central_angle = torch.acos(matrix[..., i0, i0])
227
+
228
+ o = (
229
+ _angle_from_tan(
230
+ convention[0], convention[1], matrix[..., i2], False, tait_bryan
231
+ ),
232
+ central_angle,
233
+ _angle_from_tan(
234
+ convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
235
+ ),
236
+ )
237
+ return torch.stack(o, -1)
238
+
239
+
240
+ def random_quaternions(
241
+ n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
242
+ ):
243
+ """
244
+ Generate random quaternions representing rotations,
245
+ i.e. versors with nonnegative real part.
246
+ Args:
247
+ n: Number of quaternions in a batch to return.
248
+ dtype: Type to return.
249
+ device: Desired device of returned tensor. Default:
250
+ uses the current device for the default tensor type.
251
+ requires_grad: Whether the resulting tensor should have the gradient
252
+ flag set.
253
+ Returns:
254
+ Quaternions as tensor of shape (N, 4).
255
+ """
256
+ o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
257
+ s = (o * o).sum(1)
258
+ o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
259
+ return o
260
+
261
+
262
+ def random_rotations(
263
+ n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
264
+ ):
265
+ """
266
+ Generate random rotations as 3x3 rotation matrices.
267
+ Args:
268
+ n: Number of rotation matrices in a batch to return.
269
+ dtype: Type to return.
270
+ device: Device of returned tensor. Default: if None,
271
+ uses the current device for the default tensor type.
272
+ requires_grad: Whether the resulting tensor should have the gradient
273
+ flag set.
274
+ Returns:
275
+ Rotation matrices as tensor of shape (n, 3, 3).
276
+ """
277
+ quaternions = random_quaternions(
278
+ n, dtype=dtype, device=device, requires_grad=requires_grad
279
+ )
280
+ return quaternion_to_matrix(quaternions)
281
+
282
+
283
+ def random_rotation(
284
+ dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
285
+ ):
286
+ """
287
+ Generate a single random 3x3 rotation matrix.
288
+ Args:
289
+ dtype: Type to return
290
+ device: Device of returned tensor. Default: if None,
291
+ uses the current device for the default tensor type
292
+ requires_grad: Whether the resulting tensor should have the gradient
293
+ flag set
294
+ Returns:
295
+ Rotation matrix as tensor of shape (3, 3).
296
+ """
297
+ return random_rotations(1, dtype, device, requires_grad)[0]
298
+
299
+
300
+ def standardize_quaternion(quaternions):
301
+ """
302
+ Convert a unit quaternion to a standard form: one in which the real
303
+ part is non negative.
304
+ Args:
305
+ quaternions: Quaternions with real part first,
306
+ as tensor of shape (..., 4).
307
+ Returns:
308
+ Standardized quaternions as tensor of shape (..., 4).
309
+ """
310
+ return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
311
+
312
+
313
+ def quaternion_raw_multiply(a, b):
314
+ """
315
+ Multiply two quaternions.
316
+ Usual torch rules for broadcasting apply.
317
+ Args:
318
+ a: Quaternions as tensor of shape (..., 4), real part first.
319
+ b: Quaternions as tensor of shape (..., 4), real part first.
320
+ Returns:
321
+ The product of a and b, a tensor of quaternions shape (..., 4).
322
+ """
323
+ aw, ax, ay, az = torch.unbind(a, -1)
324
+ bw, bx, by, bz = torch.unbind(b, -1)
325
+ ow = aw * bw - ax * bx - ay * by - az * bz
326
+ ox = aw * bx + ax * bw + ay * bz - az * by
327
+ oy = aw * by - ax * bz + ay * bw + az * bx
328
+ oz = aw * bz + ax * by - ay * bx + az * bw
329
+ return torch.stack((ow, ox, oy, oz), -1)
330
+
331
+
332
+ def quaternion_multiply(a, b):
333
+ """
334
+ Multiply two quaternions representing rotations, returning the quaternion
335
+ representing their composition, i.e. the versor with nonnegative real part.
336
+ Usual torch rules for broadcasting apply.
337
+ Args:
338
+ a: Quaternions as tensor of shape (..., 4), real part first.
339
+ b: Quaternions as tensor of shape (..., 4), real part first.
340
+ Returns:
341
+ The product of a and b, a tensor of quaternions of shape (..., 4).
342
+ """
343
+ ab = quaternion_raw_multiply(a, b)
344
+ return standardize_quaternion(ab)
345
+
346
+
347
+ def quaternion_invert(quaternion):
348
+ """
349
+ Given a quaternion representing rotation, get the quaternion representing
350
+ its inverse.
351
+ Args:
352
+ quaternion: Quaternions as tensor of shape (..., 4), with real part
353
+ first, which must be versors (unit quaternions).
354
+ Returns:
355
+ The inverse, a tensor of quaternions of shape (..., 4).
356
+ """
357
+
358
+ return quaternion * quaternion.new_tensor([1, -1, -1, -1])
359
+
360
+
361
+ def quaternion_apply(quaternion, point):
362
+ """
363
+ Apply the rotation given by a quaternion to a 3D point.
364
+ Usual torch rules for broadcasting apply.
365
+ Args:
366
+ quaternion: Tensor of quaternions, real part first, of shape (..., 4).
367
+ point: Tensor of 3D points of shape (..., 3).
368
+ Returns:
369
+ Tensor of rotated points of shape (..., 3).
370
+ """
371
+ if point.size(-1) != 3:
372
+ raise ValueError(f"Points are not in 3D, f{point.shape}.")
373
+ real_parts = point.new_zeros(point.shape[:-1] + (1,))
374
+ point_as_quaternion = torch.cat((real_parts, point), -1)
375
+ out = quaternion_raw_multiply(
376
+ quaternion_raw_multiply(quaternion, point_as_quaternion),
377
+ quaternion_invert(quaternion),
378
+ )
379
+ return out[..., 1:]
380
+
381
+
382
+ def axis_angle_to_matrix(axis_angle):
383
+ """
384
+ Convert rotations given as axis/angle to rotation matrices.
385
+ Args:
386
+ axis_angle: Rotations given as a vector in axis angle form,
387
+ as a tensor of shape (..., 3), where the magnitude is
388
+ the angle turned anticlockwise in radians around the
389
+ vector's direction.
390
+ Returns:
391
+ Rotation matrices as tensor of shape (..., 3, 3).
392
+ """
393
+ return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
394
+
395
+
396
+ def matrix_to_axis_angle(matrix):
397
+ """
398
+ Convert rotations given as rotation matrices to axis/angle.
399
+ Args:
400
+ matrix: Rotation matrices as tensor of shape (..., 3, 3).
401
+ Returns:
402
+ Rotations given as a vector in axis angle form, as a tensor
403
+ of shape (..., 3), where the magnitude is the angle
404
+ turned anticlockwise in radians around the vector's
405
+ direction.
406
+ """
407
+ return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
408
+
409
+
410
+ def axis_angle_to_quaternion(axis_angle):
411
+ """
412
+ Convert rotations given as axis/angle to quaternions.
413
+ Args:
414
+ axis_angle: Rotations given as a vector in axis angle form,
415
+ as a tensor of shape (..., 3), where the magnitude is
416
+ the angle turned anticlockwise in radians around the
417
+ vector's direction.
418
+ Returns:
419
+ quaternions with real part first, as tensor of shape (..., 4).
420
+ """
421
+ angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
422
+ half_angles = 0.5 * angles
423
+ eps = 1e-6
424
+ small_angles = angles.abs() < eps
425
+ sin_half_angles_over_angles = torch.empty_like(angles)
426
+ sin_half_angles_over_angles[~small_angles] = (
427
+ torch.sin(half_angles[~small_angles]) / angles[~small_angles]
428
+ )
429
+ # for x small, sin(x/2) is about x/2 - (x/2)^3/6
430
+ # so sin(x/2)/x is about 1/2 - (x*x)/48
431
+ sin_half_angles_over_angles[small_angles] = (
432
+ 0.5 - (angles[small_angles] * angles[small_angles]) / 48
433
+ )
434
+ quaternions = torch.cat(
435
+ [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
436
+ )
437
+ return quaternions
438
+
439
+
440
+ def quaternion_to_axis_angle(quaternions):
441
+ """
442
+ Convert rotations given as quaternions to axis/angle.
443
+ Args:
444
+ quaternions: quaternions with real part first,
445
+ as tensor of shape (..., 4).
446
+ Returns:
447
+ Rotations given as a vector in axis angle form, as a tensor
448
+ of shape (..., 3), where the magnitude is the angle
449
+ turned anticlockwise in radians around the vector's
450
+ direction.
451
+ """
452
+ norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
453
+ half_angles = torch.atan2(norms, quaternions[..., :1])
454
+ angles = 2 * half_angles
455
+ eps = 1e-6
456
+ small_angles = angles.abs() < eps
457
+ sin_half_angles_over_angles = torch.empty_like(angles)
458
+ sin_half_angles_over_angles[~small_angles] = (
459
+ torch.sin(half_angles[~small_angles]) / angles[~small_angles]
460
+ )
461
+ # for x small, sin(x/2) is about x/2 - (x/2)^3/6
462
+ # so sin(x/2)/x is about 1/2 - (x*x)/48
463
+ sin_half_angles_over_angles[small_angles] = (
464
+ 0.5 - (angles[small_angles] * angles[small_angles]) / 48
465
+ )
466
+ return quaternions[..., 1:] / sin_half_angles_over_angles
467
+
468
+
469
+ def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
470
+ """
471
+ Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
472
+ using Gram--Schmidt orthogonalisation per Section B of [1].
473
+ Args:
474
+ d6: 6D rotation representation, of size (*, 6)
475
+ Returns:
476
+ batch of rotation matrices of size (*, 3, 3)
477
+ [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
478
+ On the Continuity of Rotation Representations in Neural Networks.
479
+ IEEE Conference on Computer Vision and Pattern Recognition, 2019.
480
+ Retrieved from http://arxiv.org/abs/1812.07035
481
+ """
482
+
483
+ a1, a2 = d6[..., :3], d6[..., 3:]
484
+ b1 = F.normalize(a1, dim=-1)
485
+ b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
486
+ b2 = F.normalize(b2, dim=-1)
487
+ b3 = torch.cross(b1, b2, dim=-1)
488
+ return torch.stack((b1, b2, b3), dim=-2)
489
+
490
+
491
+ def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
492
+ """
493
+ Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
494
+ by dropping the last row. Note that 6D representation is not unique.
495
+ Args:
496
+ matrix: batch of rotation matrices of size (*, 3, 3)
497
+ Returns:
498
+ 6D rotation representation, of size (*, 6)
499
+ [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
500
+ On the Continuity of Rotation Representations in Neural Networks.
501
+ IEEE Conference on Computer Vision and Pattern Recognition, 2019.
502
+ Retrieved from http://arxiv.org/abs/1812.07035
503
+ """
504
+ return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6)
505
+
506
+ def canonicalize_smplh(poses, trans = None):
507
+ bs, nframes, njoints = poses.shape[:3]
508
+
509
+ global_orient = poses[:, :, 0]
510
+
511
+ # first global rotations
512
+ rot2d = matrix_to_axis_angle(global_orient[:, 0])
513
+ #rot2d[:, :2] = 0 # Remove the rotation along the vertical axis
514
+ rot2d = axis_angle_to_matrix(rot2d)
515
+
516
+ # Rotate the global rotation to eliminate Z rotations
517
+ global_orient = torch.einsum("ikj,imkl->imjl", rot2d, global_orient)
518
+
519
+ # Construct canonicalized version of x
520
+ xc = torch.cat((global_orient[:, :, None], poses[:, :, 1:]), dim=2)
521
+
522
+ if trans is not None:
523
+ vel = trans[:, 1:] - trans[:, :-1]
524
+ # Turn the translation as well
525
+ vel = torch.einsum("ikj,ilk->ilj", rot2d, vel)
526
+ trans = torch.cat((torch.zeros(bs, 1, 3, device=vel.device),
527
+ torch.cumsum(vel, 1)), 1)
528
+ return xc, trans
529
+ else:
530
+ return xc
531
+
532
+
SMPLX/smplx/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4
+ # holder of all proprietary rights on this computer program.
5
+ # You can only use this computer program if you have closed
6
+ # a license agreement with MPG or you get the right to use the computer
7
+ # program from someone who is authorized to grant you that right.
8
+ # Any use of the computer program without a valid license is prohibited and
9
+ # liable to prosecution.
10
+ #
11
+ # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12
+ # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13
+ # for Intelligent Systems. All rights reserved.
14
+ #
15
+ # Contact: ps-license@tuebingen.mpg.de
16
+
17
+ from .body_models import (
18
+ create,
19
+ SMPL,
20
+ SMPLH,
21
+ SMPLX,
22
+ MANO,
23
+ FLAME,
24
+ build_layer,
25
+ SMPLLayer,
26
+ SMPLHLayer,
27
+ SMPLXLayer,
28
+ MANOLayer,
29
+ FLAMELayer,
30
+ )
SMPLX/smplx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (375 Bytes). View file
 
SMPLX/smplx/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (531 Bytes). View file
 
SMPLX/smplx/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (373 Bytes). View file
 
SMPLX/smplx/__pycache__/body_models.cpython-310.pyc ADDED
Binary file (62.7 kB). View file
 
SMPLX/smplx/__pycache__/body_models.cpython-311.pyc ADDED
Binary file (106 kB). View file
 
SMPLX/smplx/__pycache__/body_models.cpython-39.pyc ADDED
Binary file (62.1 kB). View file
 
SMPLX/smplx/__pycache__/joint_names.cpython-39.pyc ADDED
Binary file (4.2 kB). View file
 
SMPLX/smplx/__pycache__/lbs.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
SMPLX/smplx/__pycache__/lbs.cpython-311.pyc ADDED
Binary file (17.2 kB). View file
 
SMPLX/smplx/__pycache__/lbs.cpython-39.pyc ADDED
Binary file (11.2 kB). View file
 
SMPLX/smplx/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.4 kB). View file
 
SMPLX/smplx/__pycache__/utils.cpython-311.pyc ADDED
Binary file (7.36 kB). View file
 
SMPLX/smplx/__pycache__/utils.cpython-39.pyc ADDED
Binary file (4.51 kB). View file
 
SMPLX/smplx/__pycache__/vertex_ids.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
SMPLX/smplx/__pycache__/vertex_ids.cpython-311.pyc ADDED
Binary file (1.55 kB). View file
 
SMPLX/smplx/__pycache__/vertex_ids.cpython-39.pyc ADDED
Binary file (934 Bytes). View file
 
SMPLX/smplx/__pycache__/vertex_joint_selector.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
SMPLX/smplx/__pycache__/vertex_joint_selector.cpython-311.pyc ADDED
Binary file (3 kB). View file
 
SMPLX/smplx/__pycache__/vertex_joint_selector.cpython-39.pyc ADDED
Binary file (1.67 kB). View file
 
SMPLX/smplx/body_models.py ADDED
The diff for this file is too large to render. See raw diff
 
SMPLX/smplx/joint_names.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4
+ # holder of all proprietary rights on this computer program.
5
+ # You can only use this computer program if you have closed
6
+ # a license agreement with MPG or you get the right to use the computer
7
+ # program from someone who is authorized to grant you that right.
8
+ # Any use of the computer program without a valid license is prohibited and
9
+ # liable to prosecution.
10
+ #
11
+ # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12
+ # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13
+ # for Intelligent Systems. All rights reserved.
14
+ #
15
+ # Contact: ps-license@tuebingen.mpg.de
16
+
17
+ import numpy as np
18
+
19
+ JOINT_NAMES = [
20
+ "pelvis",
21
+ "left_hip",
22
+ "right_hip",
23
+ "spine1",
24
+ "left_knee",
25
+ "right_knee",
26
+ "spine2",
27
+ "left_ankle",
28
+ "right_ankle",
29
+ "spine3",
30
+ "left_foot",
31
+ "right_foot",
32
+ "neck",
33
+ "left_collar",
34
+ "right_collar",
35
+ "head",
36
+ "left_shoulder",
37
+ "right_shoulder",
38
+ "left_elbow",
39
+ "right_elbow",
40
+ "left_wrist",
41
+ "right_wrist",
42
+ "jaw",
43
+ "left_eye_smplhf",
44
+ "right_eye_smplhf",
45
+ "left_index1",
46
+ "left_index2",
47
+ "left_index3",
48
+ "left_middle1",
49
+ "left_middle2",
50
+ "left_middle3",
51
+ "left_pinky1",
52
+ "left_pinky2",
53
+ "left_pinky3",
54
+ "left_ring1",
55
+ "left_ring2",
56
+ "left_ring3",
57
+ "left_thumb1",
58
+ "left_thumb2",
59
+ "left_thumb3",
60
+ "right_index1",
61
+ "right_index2",
62
+ "right_index3",
63
+ "right_middle1",
64
+ "right_middle2",
65
+ "right_middle3",
66
+ "right_pinky1",
67
+ "right_pinky2",
68
+ "right_pinky3",
69
+ "right_ring1",
70
+ "right_ring2",
71
+ "right_ring3",
72
+ "right_thumb1",
73
+ "right_thumb2",
74
+ "right_thumb3",
75
+ "nose",
76
+ "right_eye",
77
+ "left_eye",
78
+ "right_ear",
79
+ "left_ear",
80
+ "left_big_toe",
81
+ "left_small_toe",
82
+ "left_heel",
83
+ "right_big_toe",
84
+ "right_small_toe",
85
+ "right_heel",
86
+ "left_thumb",
87
+ "left_index",
88
+ "left_middle",
89
+ "left_ring",
90
+ "left_pinky",
91
+ "right_thumb",
92
+ "right_index",
93
+ "right_middle",
94
+ "right_ring",
95
+ "right_pinky",
96
+ "right_eye_brow1",
97
+ "right_eye_brow2",
98
+ "right_eye_brow3",
99
+ "right_eye_brow4",
100
+ "right_eye_brow5",
101
+ "left_eye_brow5",
102
+ "left_eye_brow4",
103
+ "left_eye_brow3",
104
+ "left_eye_brow2",
105
+ "left_eye_brow1",
106
+ "nose1",
107
+ "nose2",
108
+ "nose3",
109
+ "nose4",
110
+ "right_nose_2",
111
+ "right_nose_1",
112
+ "nose_middle",
113
+ "left_nose_1",
114
+ "left_nose_2",
115
+ "right_eye1",
116
+ "right_eye2",
117
+ "right_eye3",
118
+ "right_eye4",
119
+ "right_eye5",
120
+ "right_eye6",
121
+ "left_eye4",
122
+ "left_eye3",
123
+ "left_eye2",
124
+ "left_eye1",
125
+ "left_eye6",
126
+ "left_eye5",
127
+ "right_mouth_1",
128
+ "right_mouth_2",
129
+ "right_mouth_3",
130
+ "mouth_top",
131
+ "left_mouth_3",
132
+ "left_mouth_2",
133
+ "left_mouth_1",
134
+ "left_mouth_5", # 59 in OpenPose output
135
+ "left_mouth_4", # 58 in OpenPose output
136
+ "mouth_bottom",
137
+ "right_mouth_4",
138
+ "right_mouth_5",
139
+ "right_lip_1",
140
+ "right_lip_2",
141
+ "lip_top",
142
+ "left_lip_2",
143
+ "left_lip_1",
144
+ "left_lip_3",
145
+ "lip_bottom",
146
+ "right_lip_3",
147
+ # Face contour
148
+ "right_contour_1",
149
+ "right_contour_2",
150
+ "right_contour_3",
151
+ "right_contour_4",
152
+ "right_contour_5",
153
+ "right_contour_6",
154
+ "right_contour_7",
155
+ "right_contour_8",
156
+ "contour_middle",
157
+ "left_contour_8",
158
+ "left_contour_7",
159
+ "left_contour_6",
160
+ "left_contour_5",
161
+ "left_contour_4",
162
+ "left_contour_3",
163
+ "left_contour_2",
164
+ "left_contour_1",
165
+ ]
166
+
167
+
168
+ SMPLH_JOINT_NAMES = [
169
+ "pelvis",
170
+ "left_hip",
171
+ "right_hip",
172
+ "spine1",
173
+ "left_knee",
174
+ "right_knee",
175
+ "spine2",
176
+ "left_ankle",
177
+ "right_ankle",
178
+ "spine3",
179
+ "left_foot",
180
+ "right_foot",
181
+ "neck",
182
+ "left_collar",
183
+ "right_collar",
184
+ "head",
185
+ "left_shoulder",
186
+ "right_shoulder",
187
+ "left_elbow",
188
+ "right_elbow",
189
+ "left_wrist",
190
+ "right_wrist",
191
+ "left_index1",
192
+ "left_index2",
193
+ "left_index3",
194
+ "left_middle1",
195
+ "left_middle2",
196
+ "left_middle3",
197
+ "left_pinky1",
198
+ "left_pinky2",
199
+ "left_pinky3",
200
+ "left_ring1",
201
+ "left_ring2",
202
+ "left_ring3",
203
+ "left_thumb1",
204
+ "left_thumb2",
205
+ "left_thumb3",
206
+ "right_index1",
207
+ "right_index2",
208
+ "right_index3",
209
+ "right_middle1",
210
+ "right_middle2",
211
+ "right_middle3",
212
+ "right_pinky1",
213
+ "right_pinky2",
214
+ "right_pinky3",
215
+ "right_ring1",
216
+ "right_ring2",
217
+ "right_ring3",
218
+ "right_thumb1",
219
+ "right_thumb2",
220
+ "right_thumb3",
221
+ "nose",
222
+ "right_eye",
223
+ "left_eye",
224
+ "right_ear",
225
+ "left_ear",
226
+ "left_big_toe",
227
+ "left_small_toe",
228
+ "left_heel",
229
+ "right_big_toe",
230
+ "right_small_toe",
231
+ "right_heel",
232
+ "left_thumb",
233
+ "left_index",
234
+ "left_middle",
235
+ "left_ring",
236
+ "left_pinky",
237
+ "right_thumb",
238
+ "right_index",
239
+ "right_middle",
240
+ "right_ring",
241
+ "right_pinky",
242
+ ]
243
+
244
+ SMPL_JOINT_NAMES = [
245
+ "pelvis",
246
+ "left_hip",
247
+ "right_hip",
248
+ "spine1",
249
+ "left_knee",
250
+ "right_knee",
251
+ "spine2",
252
+ "left_ankle",
253
+ "right_ankle",
254
+ "spine3",
255
+ "left_foot",
256
+ "right_foot",
257
+ "neck",
258
+ "left_collar",
259
+ "right_collar",
260
+ "head",
261
+ "left_shoulder",
262
+ "right_shoulder",
263
+ "left_elbow",
264
+ "right_elbow",
265
+ "left_wrist",
266
+ "right_wrist",
267
+ "left_hand",
268
+ "right_hand",
269
+ ]
270
+
271
+
272
+ class Body:
273
+ """
274
+ Class for storing a single body pose.
275
+ """
276
+
277
+ def __init__(self, joints, joint_names):
278
+ assert joints.ndim > 1
279
+ assert joints.shape[0] == len(joint_names)
280
+ self.joints = {}
281
+ for i, j in enumerate(joint_names):
282
+ self.joints[j] = joints[i]
283
+
284
+ @staticmethod
285
+ def from_smpl(joints):
286
+ """
287
+ Create a Body object from SMPL joints.
288
+ """
289
+ return Body(joints, SMPL_JOINT_NAMES)
290
+
291
+ @staticmethod
292
+ def from_smplh(joints):
293
+ """
294
+ Create a Body object from SMPLH joints.
295
+ """
296
+ return Body(joints, SMPLH_JOINT_NAMES)
297
+
298
+ def _as(self, joint_names):
299
+ """
300
+ Return a Body object with the specified joint names.
301
+ """
302
+ joint_list = []
303
+ for j in joint_names:
304
+ if j not in self.joints:
305
+ joint_list.append(np.zeros_like(self.joints["spine1"]))
306
+ else:
307
+ joint_list.append(self.joints[j])
308
+ return np.stack(joint_list, axis=0)
309
+
310
+ def as_smpl(self):
311
+ """
312
+ Convert the body to SMPL joints.
313
+ """
314
+ return self._as(SMPL_JOINT_NAMES)
315
+
316
+ def as_smplh(self):
317
+ """
318
+ Convert the body to SMPLH joints.
319
+ """
320
+ return self._as(SMPLH_JOINT_NAMES)
SMPLX/smplx/lbs.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
4
+ # holder of all proprietary rights on this computer program.
5
+ # You can only use this computer program if you have closed
6
+ # a license agreement with MPG or you get the right to use the computer
7
+ # program from someone who is authorized to grant you that right.
8
+ # Any use of the computer program without a valid license is prohibited and
9
+ # liable to prosecution.
10
+ #
11
+ # Copyright©2019 Max-Planck-Gesellschaft zur Förderung
12
+ # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
13
+ # for Intelligent Systems. All rights reserved.
14
+ #
15
+ # Contact: ps-license@tuebingen.mpg.de
16
+
17
+ from __future__ import absolute_import
18
+ from __future__ import print_function
19
+ from __future__ import division
20
+
21
+ from typing import Tuple, List
22
+ import numpy as np
23
+
24
+ import torch
25
+ import torch.nn.functional as F
26
+
27
+ from .utils import rot_mat_to_euler, Tensor
28
+
29
+
30
+ def find_dynamic_lmk_idx_and_bcoords(
31
+ vertices: Tensor,
32
+ pose: Tensor,
33
+ dynamic_lmk_faces_idx: Tensor,
34
+ dynamic_lmk_b_coords: Tensor,
35
+ neck_kin_chain: List[int],
36
+ pose2rot: bool = True,
37
+ ) -> Tuple[Tensor, Tensor]:
38
+ ''' Compute the faces, barycentric coordinates for the dynamic landmarks
39
+
40
+
41
+ To do so, we first compute the rotation of the neck around the y-axis
42
+ and then use a pre-computed look-up table to find the faces and the
43
+ barycentric coordinates that will be used.
44
+
45
+ Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
46
+ for providing the original TensorFlow implementation and for the LUT.
47
+
48
+ Parameters
49
+ ----------
50
+ vertices: torch.tensor BxVx3, dtype = torch.float32
51
+ The tensor of input vertices
52
+ pose: torch.tensor Bx(Jx3), dtype = torch.float32
53
+ The current pose of the body model
54
+ dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
55
+ The look-up table from neck rotation to faces
56
+ dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
57
+ The look-up table from neck rotation to barycentric coordinates
58
+ neck_kin_chain: list
59
+ A python list that contains the indices of the joints that form the
60
+ kinematic chain of the neck.
61
+ dtype: torch.dtype, optional
62
+
63
+ Returns
64
+ -------
65
+ dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
66
+ A tensor of size BxL that contains the indices of the faces that
67
+ will be used to compute the current dynamic landmarks.
68
+ dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
69
+ A tensor of size BxL that contains the indices of the faces that
70
+ will be used to compute the current dynamic landmarks.
71
+ '''
72
+
73
+ dtype = vertices.dtype
74
+ batch_size = vertices.shape[0]
75
+
76
+ if pose2rot:
77
+ aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
78
+ neck_kin_chain)
79
+ rot_mats = batch_rodrigues(
80
+ aa_pose.view(-1, 3)).view(batch_size, -1, 3, 3)
81
+ else:
82
+ rot_mats = torch.index_select(
83
+ pose.view(batch_size, -1, 3, 3), 1, neck_kin_chain)
84
+
85
+ rel_rot_mat = torch.eye(
86
+ 3, device=vertices.device, dtype=dtype).unsqueeze_(dim=0).repeat(
87
+ batch_size, 1, 1)
88
+ for idx in range(len(neck_kin_chain)):
89
+ rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
90
+
91
+ y_rot_angle = torch.round(
92
+ torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
93
+ max=39)).to(dtype=torch.long)
94
+ neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
95
+ mask = y_rot_angle.lt(-39).to(dtype=torch.long)
96
+ neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
97
+ y_rot_angle = (neg_mask * neg_vals +
98
+ (1 - neg_mask) * y_rot_angle)
99
+
100
+ dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
101
+ 0, y_rot_angle)
102
+ dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
103
+ 0, y_rot_angle)
104
+
105
+ return dyn_lmk_faces_idx, dyn_lmk_b_coords
106
+
107
+
108
+ def vertices2landmarks(
109
+ vertices: Tensor,
110
+ faces: Tensor,
111
+ lmk_faces_idx: Tensor,
112
+ lmk_bary_coords: Tensor
113
+ ) -> Tensor:
114
+ ''' Calculates landmarks by barycentric interpolation
115
+
116
+ Parameters
117
+ ----------
118
+ vertices: torch.tensor BxVx3, dtype = torch.float32
119
+ The tensor of input vertices
120
+ faces: torch.tensor Fx3, dtype = torch.long
121
+ The faces of the mesh
122
+ lmk_faces_idx: torch.tensor L, dtype = torch.long
123
+ The tensor with the indices of the faces used to calculate the
124
+ landmarks.
125
+ lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
126
+ The tensor of barycentric coordinates that are used to interpolate
127
+ the landmarks
128
+
129
+ Returns
130
+ -------
131
+ landmarks: torch.tensor BxLx3, dtype = torch.float32
132
+ The coordinates of the landmarks for each mesh in the batch
133
+ '''
134
+ # Extract the indices of the vertices for each face
135
+ # BxLx3
136
+ batch_size, num_verts = vertices.shape[:2]
137
+ device = vertices.device
138
+
139
+ lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1).to(torch.long)).view(
140
+ batch_size, -1, 3)
141
+ #The '.to(torch.long)'.
142
+ # added to make the trace work in c++,
143
+ # otherwise you get a runtime error in c++:
144
+ # 'index_select(): Expected dtype int32 or int64 for index'
145
+
146
+ lmk_faces += torch.arange(
147
+ batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
148
+
149
+ lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
150
+ batch_size, -1, 3, 3)
151
+
152
+ landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
153
+ return landmarks
154
+
155
+
156
+ def lbs(
157
+ betas: Tensor,
158
+ pose: Tensor,
159
+ v_template: Tensor,
160
+ shapedirs: Tensor,
161
+ posedirs: Tensor,
162
+ J_regressor: Tensor,
163
+ parents: Tensor,
164
+ lbs_weights: Tensor,
165
+ pose2rot: bool = True,
166
+ ) -> Tuple[Tensor, Tensor]:
167
+ ''' Performs Linear Blend Skinning with the given shape and pose parameters
168
+
169
+ Parameters
170
+ ----------
171
+ betas : torch.tensor BxNB
172
+ The tensor of shape parameters
173
+ pose : torch.tensor Bx(J + 1) * 3
174
+ The pose parameters in axis-angle format
175
+ v_template torch.tensor BxVx3
176
+ The template mesh that will be deformed
177
+ shapedirs : torch.tensor 1xNB
178
+ The tensor of PCA shape displacements
179
+ posedirs : torch.tensor Px(V * 3)
180
+ The pose PCA coefficients
181
+ J_regressor : torch.tensor JxV
182
+ The regressor array that is used to calculate the joints from
183
+ the position of the vertices
184
+ parents: torch.tensor J
185
+ The array that describes the kinematic tree for the model
186
+ lbs_weights: torch.tensor N x V x (J + 1)
187
+ The linear blend skinning weights that represent how much the
188
+ rotation matrix of each part affects each vertex
189
+ pose2rot: bool, optional
190
+ Flag on whether to convert the input pose tensor to rotation
191
+ matrices. The default value is True. If False, then the pose tensor
192
+ should already contain rotation matrices and have a size of
193
+ Bx(J + 1)x9
194
+ dtype: torch.dtype, optional
195
+
196
+ Returns
197
+ -------
198
+ verts: torch.tensor BxVx3
199
+ The vertices of the mesh after applying the shape and pose
200
+ displacements.
201
+ joints: torch.tensor BxJx3
202
+ The joints of the model
203
+ '''
204
+
205
+ batch_size = max(betas.shape[0], pose.shape[0])
206
+ device, dtype = betas.device, betas.dtype
207
+
208
+ # Add shape contribution
209
+ v_shaped = v_template + blend_shapes(betas, shapedirs)
210
+
211
+ # Get the joints
212
+ # NxJx3 array
213
+ J = vertices2joints(J_regressor, v_shaped)
214
+
215
+ # 3. Add pose blend shapes
216
+ # N x J x 3 x 3
217
+ ident = torch.eye(3, dtype=dtype, device=device)
218
+ if pose2rot:
219
+ rot_mats = batch_rodrigues(pose.view(-1, 3)).view(
220
+ [batch_size, -1, 3, 3])
221
+
222
+ pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
223
+ # (N x P) x (P, V * 3) -> N x V x 3
224
+ pose_offsets = torch.matmul(
225
+ pose_feature, posedirs).view(batch_size, -1, 3)
226
+ else:
227
+ pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
228
+ rot_mats = pose.view(batch_size, -1, 3, 3)
229
+
230
+ pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
231
+ posedirs).view(batch_size, -1, 3)
232
+
233
+ v_posed = pose_offsets + v_shaped
234
+ # 4. Get the global joint location
235
+ J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
236
+
237
+ # 5. Do skinning:
238
+ # W is N x V x (J + 1)
239
+ W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
240
+ # (N x V x (J + 1)) x (N x (J + 1) x 16)
241
+ num_joints = J_regressor.shape[0]
242
+ T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
243
+ .view(batch_size, -1, 4, 4)
244
+
245
+ homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
246
+ dtype=dtype, device=device)
247
+ v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
248
+ v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
249
+
250
+ verts = v_homo[:, :, :3, 0]
251
+
252
+ return verts, J_transformed
253
+
254
+
255
+ def vertices2joints(J_regressor: Tensor, vertices: Tensor) -> Tensor:
256
+ ''' Calculates the 3D joint locations from the vertices
257
+
258
+ Parameters
259
+ ----------
260
+ J_regressor : torch.tensor JxV
261
+ The regressor array that is used to calculate the joints from the
262
+ position of the vertices
263
+ vertices : torch.tensor BxVx3
264
+ The tensor of mesh vertices
265
+
266
+ Returns
267
+ -------
268
+ torch.tensor BxJx3
269
+ The location of the joints
270
+ '''
271
+
272
+ return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
273
+
274
+
275
+ def blend_shapes(betas: Tensor, shape_disps: Tensor) -> Tensor:
276
+ ''' Calculates the per vertex displacement due to the blend shapes
277
+
278
+
279
+ Parameters
280
+ ----------
281
+ betas : torch.tensor Bx(num_betas)
282
+ Blend shape coefficients
283
+ shape_disps: torch.tensor Vx3x(num_betas)
284
+ Blend shapes
285
+
286
+ Returns
287
+ -------
288
+ torch.tensor BxVx3
289
+ The per-vertex displacement due to shape deformation
290
+ '''
291
+
292
+ # Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
293
+ # i.e. Multiply each shape displacement by its corresponding beta and
294
+ # then sum them.
295
+ blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
296
+ return blend_shape
297
+
298
+
299
+ def batch_rodrigues(
300
+ rot_vecs: Tensor,
301
+ epsilon: float = 1e-8,
302
+ ) -> Tensor:
303
+ ''' Calculates the rotation matrices for a batch of rotation vectors
304
+ Parameters
305
+ ----------
306
+ rot_vecs: torch.tensor Nx3
307
+ array of N axis-angle vectors
308
+ Returns
309
+ -------
310
+ R: torch.tensor Nx3x3
311
+ The rotation matrices for the given axis-angle parameters
312
+ '''
313
+
314
+ batch_size = rot_vecs.shape[0]
315
+ device, dtype = rot_vecs.device, rot_vecs.dtype
316
+
317
+ angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
318
+ rot_dir = rot_vecs / angle
319
+
320
+ cos = torch.unsqueeze(torch.cos(angle), dim=1)
321
+ sin = torch.unsqueeze(torch.sin(angle), dim=1)
322
+
323
+ # Bx1 arrays
324
+ rx, ry, rz = torch.split(rot_dir, 1, dim=1)
325
+ K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
326
+
327
+ zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
328
+ K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
329
+ .view((batch_size, 3, 3))
330
+
331
+ ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
332
+ rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
333
+ return rot_mat
334
+
335
+
336
+ def transform_mat(R: Tensor, t: Tensor) -> Tensor:
337
+ ''' Creates a batch of transformation matrices
338
+ Args:
339
+ - R: Bx3x3 array of a batch of rotation matrices
340
+ - t: Bx3x1 array of a batch of translation vectors
341
+ Returns:
342
+ - T: Bx4x4 Transformation matrix
343
+ '''
344
+ # No padding left or right, only add an extra row
345
+ return torch.cat([F.pad(R, [0, 0, 0, 1]),
346
+ F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
347
+
348
+
349
+ def batch_rigid_transform(
350
+ rot_mats: Tensor,
351
+ joints: Tensor,
352
+ parents: Tensor,
353
+ dtype=torch.float32
354
+ ) -> Tensor:
355
+ """
356
+ Applies a batch of rigid transformations to the joints
357
+
358
+ Parameters
359
+ ----------
360
+ rot_mats : torch.tensor BxNx3x3
361
+ Tensor of rotation matrices
362
+ joints : torch.tensor BxNx3
363
+ Locations of joints
364
+ parents : torch.tensor BxN
365
+ The kinematic tree of each object
366
+ dtype : torch.dtype, optional:
367
+ The data type of the created tensors, the default is torch.float32
368
+
369
+ Returns
370
+ -------
371
+ posed_joints : torch.tensor BxNx3
372
+ The locations of the joints after applying the pose rotations
373
+ rel_transforms : torch.tensor BxNx4x4
374
+ The relative (with respect to the root joint) rigid transformations
375
+ for all the joints
376
+ """
377
+
378
+ joints = torch.unsqueeze(joints, dim=-1)
379
+
380
+ rel_joints = joints.clone()
381
+ rel_joints[:, 1:] -= joints[:, parents[1:]]
382
+
383
+ transforms_mat = transform_mat(
384
+ rot_mats.reshape(-1, 3, 3),
385
+ rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
386
+
387
+ transform_chain = [transforms_mat[:, 0]]
388
+ for i in range(1, parents.shape[0]):
389
+ # Subtract the joint location at the rest pose
390
+ # No need for rotation, since it's identity when at rest
391
+ curr_res = torch.matmul(transform_chain[parents[i]],
392
+ transforms_mat[:, i])
393
+ transform_chain.append(curr_res)
394
+
395
+ transforms = torch.stack(transform_chain, dim=1)
396
+
397
+ # The last column of the transformations contains the posed joints
398
+ posed_joints = transforms[:, :, :3, 3]
399
+
400
+ joints_homogen = F.pad(joints, [0, 0, 0, 1])
401
+
402
+ rel_transforms = transforms - F.pad(
403
+ torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
404
+
405
+ return posed_joints, rel_transforms