Skip to content

Commit c285d88

Browse files
committed
Added BVH to exponensial map conversion
1 parent df48d93 commit c285d88

26 files changed

+7326
-0
lines changed

data_processing/bvh2features.py

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
# This code was written by Simon Alexanderson
2+
# and is released here: https://github.com/simonalexanderson/PyMO
3+
4+
import numpy as np
5+
import pandas as pd
6+
from sklearn.pipeline import Pipeline
7+
8+
from argparse import ArgumentParser
9+
10+
import glob
11+
import os
12+
import sys
13+
module_path = os.path.abspath(os.path.join('..'))
14+
if module_path not in sys.path:
15+
sys.path.append(module_path)
16+
17+
from pymo.parsers import BVHParser
18+
from pymo.data import Joint, MocapData
19+
from pymo.preprocessing import *
20+
from pymo.viz_tools import *
21+
from pymo.writers import *
22+
23+
import joblib as jl
24+
25+
26+
def extract_joint_angles(bvh_dir, files, dest_dir, pipeline_dir, fps):
27+
p = BVHParser()
28+
29+
data_all = list()
30+
for f in files:
31+
ff = os.path.join(bvh_dir, f + '.bvh')
32+
print(ff)
33+
data_all.append(p.parse(ff))
34+
35+
data_pipe = Pipeline([
36+
('dwnsampl', DownSampler(tgt_fps=fps, keep_all=False)),
37+
('root', RootTransformer('hip_centric')),
38+
('mir', Mirror(axis='X', append=True)),
39+
('jtsel', JointSelector(['Spine','Spine1','Spine2','Spine3','Neck','Neck1','Head','RightShoulder', 'RightArm', 'RightForeArm', 'RightHand', 'LeftShoulder', 'LeftArm', 'LeftForeArm', 'LeftHand'], include_root=True)),
40+
('exp', MocapParameterizer('expmap')),
41+
('cnst', ConstantsRemover()),
42+
('np', Numpyfier())
43+
])
44+
45+
out_data = data_pipe.fit_transform(data_all)
46+
47+
# the datapipe will append the mirrored files to the end
48+
assert len(out_data) == 2*len(files)
49+
50+
jl.dump(data_pipe, os.path.join(pipeline_dir + 'data_pipe.sav'))
51+
52+
fi=0
53+
for f in files:
54+
ff = os.path.join(dest_dir, f)
55+
print(ff)
56+
np.savez(ff + ".npz", clips=out_data[fi])
57+
np.savez(ff + "_mirrored.npz", clips=out_data[len(files)+fi])
58+
fi=fi+1
59+
60+
61+
62+
if __name__ == '__main__':
63+
64+
# Setup parameter parser
65+
parser = ArgumentParser(add_help=False)
66+
parser.add_argument('--bvh_dir', '-orig', default="./data/",
67+
help="Path where original motion files (in BVH format) are stored")
68+
parser.add_argument('--dest_dir', '-dest', default="./processed/",
69+
help="Path where extracted motion features will be stored")
70+
parser.add_argument('--pipeline_dir', '-pipe', default="./utils/",
71+
help="Path where the motion data processing pipeline will be stored")
72+
73+
params = parser.parse_args()
74+
75+
files = []
76+
# Go over all BVH files
77+
print("Going to pre-process the following motion files:")
78+
for r, d, f in os.walk(params.bvh_dir):
79+
for file in f:
80+
print(file)
81+
if '.bvh' in file:
82+
ff=os.path.join(r, file)
83+
basename = os.path.splitext(os.path.basename(ff))[0]
84+
files.append(basename)
85+
86+
extract_joint_angles(params.bvh_dir, files, params.dest_dir, params.pipeline_dir , fps=20)

data_processing/features2bvh.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# This code was written by Simon Alexanderson
2+
# and is released here: https://github.com/simonalexanderson/PyMO
3+
4+
5+
import os
6+
import sys
7+
8+
module_path = os.path.abspath(os.path.join('..'))
9+
if module_path not in sys.path:
10+
sys.path.append(module_path)
11+
12+
from pymo.parsers import BVHParser
13+
from pymo.data import Joint, MocapData
14+
from pymo.preprocessing import *
15+
from pymo.viz_tools import *
16+
from pymo.writers import *
17+
18+
import joblib as jl
19+
20+
# load data pipeline
21+
pipeline = jl.load('processed/data_pipe.sav')
22+
23+
def feat2bvh(feat_file, bvh_file):
24+
features = np.load(feat_file)['clips']
25+
print(features.shape)
26+
27+
# transform the data back to it's original shape
28+
# note: in a real scenario this is usually done with predicted data
29+
# note: some transformations (such as transforming to joint positions) are not inversible
30+
bvh_data=pipeline.inverse_transform([features])
31+
32+
# Test to write some of it to file for visualization in blender or motion builder
33+
writer = BVHWriter()
34+
with open(bvh_file,'w') as f:
35+
writer.write(bvh_data[0], f)
36+
37+
feat2bvh("processed/NaturalTalking_002.npz", 'processed/converted2.bvh')

data_processing/pymo/__init__.py

Whitespace-only changes.

data_processing/pymo/data.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import numpy as np
2+
3+
class Joint():
4+
def __init__(self, name, parent=None, children=None):
5+
self.name = name
6+
self.parent = parent
7+
self.children = children
8+
9+
class MocapData():
10+
def __init__(self):
11+
self.skeleton = {}
12+
self.values = None
13+
self.channel_names = []
14+
self.framerate = 0.0
15+
self.root_name = ''
16+
17+
def traverse(self, j=None):
18+
stack = [self.root_name]
19+
while stack:
20+
joint = stack.pop()
21+
yield joint
22+
for c in self.skeleton[joint]['children']:
23+
stack.append(c)
24+
25+
def clone(self):
26+
import copy
27+
new_data = MocapData()
28+
new_data.skeleton = copy.deepcopy(self.skeleton)
29+
new_data.values = copy.deepcopy(self.values)
30+
new_data.channel_names = copy.deepcopy(self.channel_names)
31+
new_data.root_name = copy.deepcopy(self.root_name)
32+
new_data.framerate = copy.deepcopy(self.framerate)
33+
return new_data
34+
35+
def get_all_channels(self):
36+
'''Returns all of the channels parsed from the file as a 2D numpy array'''
37+
38+
frames = [f[1] for f in self.values]
39+
return np.asarray([[channel[2] for channel in frame] for frame in frames])
40+
41+
def get_skeleton_tree(self):
42+
tree = []
43+
root_key = [j for j in self.skeleton if self.skeleton[j]['parent']==None][0]
44+
45+
root_joint = Joint(root_key)
46+
47+
def get_empty_channels(self):
48+
#TODO
49+
pass
50+
51+
def get_constant_channels(self):
52+
#TODO
53+
pass

data_processing/pymo/features.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
'''
2+
A set of mocap feature extraction functions
3+
4+
Created by Omid Alemi | Nov 17 2017
5+
6+
'''
7+
import numpy as np
8+
import pandas as pd
9+
import peakutils
10+
import matplotlib.pyplot as plt
11+
12+
def get_foot_contact_idxs(signal, t=0.02, min_dist=120):
13+
up_idxs = peakutils.indexes(signal, thres=t/max(signal), min_dist=min_dist)
14+
down_idxs = peakutils.indexes(-signal, thres=t/min(signal), min_dist=min_dist)
15+
16+
return [up_idxs, down_idxs]
17+
18+
19+
def create_foot_contact_signal(mocap_track, col_name, start=1, t=0.02, min_dist=120):
20+
signal = mocap_track.values[col_name].values
21+
idxs = get_foot_contact_idxs(signal, t, min_dist)
22+
23+
step_signal = []
24+
25+
c = start
26+
for f in range(len(signal)):
27+
if f in idxs[1]:
28+
c = 0
29+
elif f in idxs[0]:
30+
c = 1
31+
32+
step_signal.append(c)
33+
34+
return step_signal
35+
36+
def plot_foot_up_down(mocap_track, col_name, t=0.02, min_dist=120):
37+
38+
signal = mocap_track.values[col_name].values
39+
idxs = get_foot_contact_idxs(signal, t, min_dist)
40+
41+
plt.plot(mocap_track.values.index, signal)
42+
plt.plot(mocap_track.values.index[idxs[0]], signal[idxs[0]], 'ro')
43+
plt.plot(mocap_track.values.index[idxs[1]], signal[idxs[1]], 'go')
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
var dataBuffer = `$$DATA$$`;
2+
3+
start(dataBuffer);

0 commit comments

Comments
 (0)