Spaces:
Runtime error
Runtime error
Update create_graph.py
Browse files- create_graph.py +63 -64
create_graph.py
CHANGED
|
@@ -73,71 +73,58 @@ def get_motion_reps_tensor(motion_tensor, smplx_model, pose_fps=30, device='cuda
|
|
| 73 |
|
| 74 |
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
# return_joints=True,
|
| 105 |
-
# leye_pose=torch.zeros(bs * n, 3).to(device),
|
| 106 |
-
# reye_pose=torch.zeros(bs * n, 3).to(device),
|
| 107 |
-
# )
|
| 108 |
-
# joints = output["joints"].detach().cpu().numpy().reshape(n, 127, 3)[:, :55, :]
|
| 109 |
-
# dt = 1 / pose_fps
|
| 110 |
-
# init_vel = (joints[1:2] - joints[0:1]) / dt
|
| 111 |
-
# middle_vel = (joints[2:] - joints[:-2]) / (2 * dt)
|
| 112 |
-
# final_vel = (joints[-1:] - joints[-2:-1]) / dt
|
| 113 |
-
# vel = np.concatenate([init_vel, middle_vel, final_vel], axis=0)
|
| 114 |
-
# position = joints
|
| 115 |
-
# rot_matrices = rc.axis_angle_to_matrix(gt_motion_tensor.reshape(1, n, 55, 3))[0]
|
| 116 |
-
# rot6d = rc.matrix_to_rotation_6d(rot_matrices).reshape(n, 55, 6).cpu().numpy()
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
|
| 140 |
-
def create_graph(json_path):
|
| 141 |
fps = 30
|
| 142 |
data_meta = json.load(open(json_path, "r"))
|
| 143 |
graph = igraph.Graph(directed=True)
|
|
@@ -148,7 +135,7 @@ def create_graph(json_path):
|
|
| 148 |
motion_path = os.path.join(data_item['motion_path'], data_item['video_id'] + ".npz")
|
| 149 |
video_id = data_item.get("video_id", "")
|
| 150 |
motion = np.load(motion_path, allow_pickle=True)
|
| 151 |
-
motion_reps = get_motion_reps(motion)
|
| 152 |
position = motion_reps['position']
|
| 153 |
velocity = motion_reps['velocity']
|
| 154 |
trans = motion_reps['trans']
|
|
@@ -432,9 +419,21 @@ if __name__ == '__main__':
|
|
| 432 |
json_path = args.json_save_path
|
| 433 |
graph_path = args.graph_save_path
|
| 434 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
# single_test
|
| 436 |
# graph = create_graph('/content/drive/MyDrive/003_Codes/TANGO/datasets/data_json/show_oliver_test/Abortion_Laws_-_Last_Week_Tonight_with_John_Oliver_HBO-DRauXXz6t0Y.webm.json')
|
| 437 |
-
graph = create_graph(json_path)
|
| 438 |
graph = create_edges(graph)
|
| 439 |
# pool_path = "/content/drive/MyDrive/003_Codes/TANGO-JointEmbedding/datasets/oliver_test/show-oliver-test.pkl"
|
| 440 |
# graph = igraph.Graph.Read_Pickle(fname=pool_path)
|
|
|
|
| 73 |
|
| 74 |
|
| 75 |
|
| 76 |
+
def get_motion_reps(motion, smplx_model, pose_fps=30):
|
| 77 |
+
gt_motion_tensor = motion["poses"]
|
| 78 |
+
n = gt_motion_tensor.shape[0]
|
| 79 |
+
bs = 1
|
| 80 |
+
gt_motion_tensor = torch.from_numpy(gt_motion_tensor).float().to(device).unsqueeze(0)
|
| 81 |
+
gt_motion_tensor_reshaped = gt_motion_tensor.reshape(bs * n, 165)
|
| 82 |
+
output = smplx_model(
|
| 83 |
+
betas=torch.zeros(bs * n, 300).to(device),
|
| 84 |
+
transl=torch.zeros(bs * n, 3).to(device),
|
| 85 |
+
expression=torch.zeros(bs * n, 100).to(device),
|
| 86 |
+
jaw_pose=torch.zeros(bs * n, 3).to(device),
|
| 87 |
+
global_orient=torch.zeros(bs * n, 3).to(device),
|
| 88 |
+
body_pose=gt_motion_tensor_reshaped[:, 3:21 * 3 + 3],
|
| 89 |
+
left_hand_pose=gt_motion_tensor_reshaped[:, 25 * 3:40 * 3],
|
| 90 |
+
right_hand_pose=gt_motion_tensor_reshaped[:, 40 * 3:55 * 3],
|
| 91 |
+
return_joints=True,
|
| 92 |
+
leye_pose=torch.zeros(bs * n, 3).to(device),
|
| 93 |
+
reye_pose=torch.zeros(bs * n, 3).to(device),
|
| 94 |
+
)
|
| 95 |
+
joints = output["joints"].detach().cpu().numpy().reshape(n, 127, 3)[:, :55, :]
|
| 96 |
+
dt = 1 / pose_fps
|
| 97 |
+
init_vel = (joints[1:2] - joints[0:1]) / dt
|
| 98 |
+
middle_vel = (joints[2:] - joints[:-2]) / (2 * dt)
|
| 99 |
+
final_vel = (joints[-1:] - joints[-2:-1]) / dt
|
| 100 |
+
vel = np.concatenate([init_vel, middle_vel, final_vel], axis=0)
|
| 101 |
+
position = joints
|
| 102 |
+
rot_matrices = rc.axis_angle_to_matrix(gt_motion_tensor.reshape(1, n, 55, 3))[0]
|
| 103 |
+
rot6d = rc.matrix_to_rotation_6d(rot_matrices).reshape(n, 55, 6).cpu().numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
+
init_vel = (motion["poses"][1:2] - motion["poses"][0:1]) / dt
|
| 106 |
+
middle_vel = (motion["poses"][2:] - motion["poses"][:-2]) / (2 * dt)
|
| 107 |
+
final_vel = (motion["poses"][-1:] - motion["poses"][-2:-1]) / dt
|
| 108 |
+
angular_velocity = np.concatenate([init_vel, middle_vel, final_vel], axis=0).reshape(n, 55, 3)
|
| 109 |
|
| 110 |
+
rep15d = np.concatenate([
|
| 111 |
+
position,
|
| 112 |
+
vel,
|
| 113 |
+
rot6d,
|
| 114 |
+
angular_velocity],
|
| 115 |
+
axis=2
|
| 116 |
+
).reshape(n, 55*15)
|
| 117 |
+
return {
|
| 118 |
+
"position": position,
|
| 119 |
+
"velocity": vel,
|
| 120 |
+
"rotation": rot6d,
|
| 121 |
+
"axis_angle": motion["poses"],
|
| 122 |
+
"angular_velocity": angular_velocity,
|
| 123 |
+
"rep15d": rep15d,
|
| 124 |
+
"trans": motion["trans"]
|
| 125 |
+
}
|
| 126 |
|
| 127 |
+
def create_graph(json_path, smplx_model):
|
| 128 |
fps = 30
|
| 129 |
data_meta = json.load(open(json_path, "r"))
|
| 130 |
graph = igraph.Graph(directed=True)
|
|
|
|
| 135 |
motion_path = os.path.join(data_item['motion_path'], data_item['video_id'] + ".npz")
|
| 136 |
video_id = data_item.get("video_id", "")
|
| 137 |
motion = np.load(motion_path, allow_pickle=True)
|
| 138 |
+
motion_reps = get_motion_reps(motion, smplx_model)
|
| 139 |
position = motion_reps['position']
|
| 140 |
velocity = motion_reps['velocity']
|
| 141 |
trans = motion_reps['trans']
|
|
|
|
| 419 |
json_path = args.json_save_path
|
| 420 |
graph_path = args.graph_save_path
|
| 421 |
|
| 422 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 423 |
+
smplx_model = smplx.create(
|
| 424 |
+
"./emage/smplx_models/",
|
| 425 |
+
model_type='smplx',
|
| 426 |
+
gender='NEUTRAL_2020',
|
| 427 |
+
use_face_contour=False,
|
| 428 |
+
num_betas=300,
|
| 429 |
+
num_expression_coeffs=100,
|
| 430 |
+
ext='npz',
|
| 431 |
+
use_pca=False,
|
| 432 |
+
).to(device).eval()
|
| 433 |
+
|
| 434 |
# single_test
|
| 435 |
# graph = create_graph('/content/drive/MyDrive/003_Codes/TANGO/datasets/data_json/show_oliver_test/Abortion_Laws_-_Last_Week_Tonight_with_John_Oliver_HBO-DRauXXz6t0Y.webm.json')
|
| 436 |
+
graph = create_graph(json_path, smplx_model)
|
| 437 |
graph = create_edges(graph)
|
| 438 |
# pool_path = "/content/drive/MyDrive/003_Codes/TANGO-JointEmbedding/datasets/oliver_test/show-oliver-test.pkl"
|
| 439 |
# graph = igraph.Graph.Read_Pickle(fname=pool_path)
|