Spaces:
Runtime error
Runtime error
Update create_graph.py
Browse files- create_graph.py +60 -60
create_graph.py
CHANGED
|
@@ -73,69 +73,69 @@ def get_motion_reps_tensor(motion_tensor, smplx_model, pose_fps=30, device='cuda
|
|
| 73 |
|
| 74 |
|
| 75 |
|
| 76 |
-
def get_motion_reps(motion, smplx_model=smplx_model, pose_fps=30):
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
|
| 140 |
def create_graph(json_path):
|
| 141 |
fps = 30
|
|
|
|
| 73 |
|
| 74 |
|
| 75 |
|
| 76 |
+
# def get_motion_reps(motion, smplx_model=smplx_model, pose_fps=30):
|
| 77 |
+
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 78 |
+
# smplx_model = smplx.create(
|
| 79 |
+
# "./emage/smplx_models/",
|
| 80 |
+
# model_type='smplx',
|
| 81 |
+
# gender='NEUTRAL_2020',
|
| 82 |
+
# use_face_contour=False,
|
| 83 |
+
# num_betas=300,
|
| 84 |
+
# num_expression_coeffs=100,
|
| 85 |
+
# ext='npz',
|
| 86 |
+
# use_pca=False,
|
| 87 |
+
# ).to(device).eval()
|
| 88 |
+
# print("warning, smplx model is created inside fn for gradio")
|
| 89 |
|
| 90 |
+
# gt_motion_tensor = motion["poses"]
|
| 91 |
+
# n = gt_motion_tensor.shape[0]
|
| 92 |
+
# bs = 1
|
| 93 |
+
# gt_motion_tensor = torch.from_numpy(gt_motion_tensor).float().to(device).unsqueeze(0)
|
| 94 |
+
# gt_motion_tensor_reshaped = gt_motion_tensor.reshape(bs * n, 165)
|
| 95 |
+
# output = smplx_model(
|
| 96 |
+
# betas=torch.zeros(bs * n, 300).to(device),
|
| 97 |
+
# transl=torch.zeros(bs * n, 3).to(device),
|
| 98 |
+
# expression=torch.zeros(bs * n, 100).to(device),
|
| 99 |
+
# jaw_pose=torch.zeros(bs * n, 3).to(device),
|
| 100 |
+
# global_orient=torch.zeros(bs * n, 3).to(device),
|
| 101 |
+
# body_pose=gt_motion_tensor_reshaped[:, 3:21 * 3 + 3],
|
| 102 |
+
# left_hand_pose=gt_motion_tensor_reshaped[:, 25 * 3:40 * 3],
|
| 103 |
+
# right_hand_pose=gt_motion_tensor_reshaped[:, 40 * 3:55 * 3],
|
| 104 |
+
# return_joints=True,
|
| 105 |
+
# leye_pose=torch.zeros(bs * n, 3).to(device),
|
| 106 |
+
# reye_pose=torch.zeros(bs * n, 3).to(device),
|
| 107 |
+
# )
|
| 108 |
+
# joints = output["joints"].detach().cpu().numpy().reshape(n, 127, 3)[:, :55, :]
|
| 109 |
+
# dt = 1 / pose_fps
|
| 110 |
+
# init_vel = (joints[1:2] - joints[0:1]) / dt
|
| 111 |
+
# middle_vel = (joints[2:] - joints[:-2]) / (2 * dt)
|
| 112 |
+
# final_vel = (joints[-1:] - joints[-2:-1]) / dt
|
| 113 |
+
# vel = np.concatenate([init_vel, middle_vel, final_vel], axis=0)
|
| 114 |
+
# position = joints
|
| 115 |
+
# rot_matrices = rc.axis_angle_to_matrix(gt_motion_tensor.reshape(1, n, 55, 3))[0]
|
| 116 |
+
# rot6d = rc.matrix_to_rotation_6d(rot_matrices).reshape(n, 55, 6).cpu().numpy()
|
| 117 |
|
| 118 |
+
# init_vel = (motion["poses"][1:2] - motion["poses"][0:1]) / dt
|
| 119 |
+
# middle_vel = (motion["poses"][2:] - motion["poses"][:-2]) / (2 * dt)
|
| 120 |
+
# final_vel = (motion["poses"][-1:] - motion["poses"][-2:-1]) / dt
|
| 121 |
+
# angular_velocity = np.concatenate([init_vel, middle_vel, final_vel], axis=0).reshape(n, 55, 3)
|
| 122 |
|
| 123 |
+
# rep15d = np.concatenate([
|
| 124 |
+
# position,
|
| 125 |
+
# vel,
|
| 126 |
+
# rot6d,
|
| 127 |
+
# angular_velocity],
|
| 128 |
+
# axis=2
|
| 129 |
+
# ).reshape(n, 55*15)
|
| 130 |
+
# return {
|
| 131 |
+
# "position": position,
|
| 132 |
+
# "velocity": vel,
|
| 133 |
+
# "rotation": rot6d,
|
| 134 |
+
# "axis_angle": motion["poses"],
|
| 135 |
+
# "angular_velocity": angular_velocity,
|
| 136 |
+
# "rep15d": rep15d,
|
| 137 |
+
# "trans": motion["trans"]
|
| 138 |
+
# }
|
| 139 |
|
| 140 |
def create_graph(json_path):
|
| 141 |
fps = 30
|