H-Liu1997 commited on
Commit
91a5a0d
1 Parent(s): e0d5890

Update SMPLer-X/app.py

Browse files
Files changed (1) hide show
  1. SMPLer-X/app.py +134 -134
SMPLer-X/app.py CHANGED
@@ -1,135 +1,135 @@
1
- import os
2
- import shutil
3
- import argparse
4
- import sys
5
- import re
6
- import json
7
- import numpy as np
8
- import os.path as osp
9
- from pathlib import Path
10
- import cv2
11
- import torch
12
- import math
13
- from tqdm import tqdm
14
- from huggingface_hub import hf_hub_download
15
- try:
16
- import mmpose
17
- except:
18
- os.system('pip install ./main/transformer_utils')
19
- # hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
20
- # /home/user/.pyenv/versions/3.9.19/lib/python3.9/site-packages/torchgeometry/core/conversions.py
21
- # os.system('cp -rf ./assets/conversions.py /content/myenv/lib/python3.10/site-packages/torchgeometry/core/conversions.py')
22
-
23
- def extract_frame_number(file_name):
24
- match = re.search(r'(\d{5})', file_name)
25
- if match:
26
- return int(match.group(1))
27
- return None
28
-
29
- def merge_npz_files(npz_files, output_file):
30
- npz_files = sorted(npz_files, key=lambda x: extract_frame_number(os.path.basename(x)))
31
- merged_data = {}
32
- for file in npz_files:
33
- data = np.load(file)
34
- for key in data.files:
35
- if key not in merged_data:
36
- merged_data[key] = []
37
- merged_data[key].append(data[key])
38
- for key in merged_data:
39
- merged_data[key] = np.stack(merged_data[key], axis=0)
40
- np.savez(output_file, **merged_data)
41
-
42
- def npz_to_npz(pkl_path, npz_path):
43
- # Load the pickle file
44
- pkl_example = np.load(pkl_path, allow_pickle=True)
45
- n = pkl_example["expression"].shape[0] # Assuming this is the batch size
46
- full_pose = np.concatenate([pkl_example["global_orient"], pkl_example["body_pose"], pkl_example["jaw_pose"], pkl_example["leye_pose"], pkl_example["reye_pose"], pkl_example["left_hand_pose"], pkl_example["right_hand_pose"]], axis=1)
47
- # print(full_pose.shape)
48
- np.savez(npz_path,
49
- betas=np.zeros(300),
50
- poses=full_pose.reshape(n, -1),
51
- expressions=np.zeros((n, 100)),
52
- trans=pkl_example["transl"].reshape(n, -1),
53
- model='smplx2020',
54
- gender='neutral',
55
- mocap_frame_rate=30,
56
- )
57
-
58
- def get_json(root_dir, output_dir):
59
- clips = []
60
- dirs = os.listdir(root_dir)
61
- all_length = 0
62
- for dir in dirs:
63
- if not dir.endswith(".mp4"): continue
64
- video_id = dir[:-4]
65
- root = root_dir
66
- try:
67
- length = np.load(os.path.join(root, video_id+".npz"), allow_pickle=True)["poses"].shape[0]
68
- all_length += length
69
- except:
70
- print("cant open ", dir)
71
- continue
72
- clip = {
73
- "video_id": video_id,
74
- "video_path": root[1:],
75
- # "audio_path": root,
76
- "motion_path": root[1:],
77
- "mode": "test",
78
- "start_idx": 0,
79
- "end_idx": length
80
- }
81
- clips.append(clip)
82
- if all_length < 1:
83
- print(f"skip due to total frames is less than 1500 for {root_dir}")
84
- return 0
85
- else:
86
- with open(output_dir, 'w') as f:
87
- json.dump(clips, f, indent=4)
88
- return all_length
89
-
90
-
91
- def infer(video_input, in_threshold, num_people, render_mesh, inferer, OUT_FOLDER):
92
- os.system(f'rm -rf {OUT_FOLDER}/smplx/*')
93
- multi_person = num_people
94
- cap = cv2.VideoCapture(video_input)
95
- video_name = video_input.split("/")[-1]
96
- success = 1
97
- frame = 0
98
- while success:
99
- success, original_img = cap.read()
100
- if not success:
101
- break
102
- frame += 1
103
- _, _, _ = inferer.infer(original_img, in_threshold, frame, multi_person, not(render_mesh))
104
- cap.release()
105
- npz_files = [os.path.join(OUT_FOLDER, 'smplx', x) for x in os.listdir(os.path.join(OUT_FOLDER, 'smplx'))]
106
-
107
- merge_npz_files(npz_files, os.path.join(OUT_FOLDER, video_name.replace(".mp4", ".npz")))
108
- os.system(f'rm -r {OUT_FOLDER}/smplx')
109
- npz_to_npz(os.path.join(OUT_FOLDER, video_name.replace(".mp4", ".npz")), os.path.join(OUT_FOLDER, video_name.replace(".mp4", ".npz")))
110
- source = video_input
111
- destination = os.path.join(OUT_FOLDER, video_name.replace('.mp4', '.npz')).replace('.npz', '.mp4')
112
- shutil.copy(source, destination)
113
-
114
- if __name__ == "__main__":
115
- parser = argparse.ArgumentParser()
116
- parser.add_argument("--video_folder_path", type=str, default="")
117
- parser.add_argument("--data_save_path", type=str, default="")
118
- parser.add_argument("--json_save_path", type=str, default="")
119
- args = parser.parse_args()
120
- video_folder = args.video_folder_path
121
-
122
- DEFAULT_MODEL='smpler_x_s32'
123
- OUT_FOLDER = args.data_save_path
124
- os.makedirs(OUT_FOLDER, exist_ok=True)
125
- num_gpus = 1 if torch.cuda.is_available() else -1
126
- index = torch.cuda.current_device()
127
- from main.inference import Inferer
128
- inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
129
-
130
- for video_input in tqdm(os.listdir(video_folder)):
131
- if not video_input.endswith(".mp4"):
132
- continue
133
- infer(os.path.join(video_folder, video_input), 0.5, False, False, inferer, OUT_FOLDER)
134
- get_json(OUT_FOLDER, args.json_save_path)
135
 
 
1
+ import os
2
+ import shutil
3
+ import argparse
4
+ import sys
5
+ import re
6
+ import json
7
+ import numpy as np
8
+ import os.path as osp
9
+ from pathlib import Path
10
+ import cv2
11
+ import torch
12
+ import math
13
+ from tqdm import tqdm
14
+ from huggingface_hub import hf_hub_download
15
+ try:
16
+ import mmpose
17
+ except:
18
+ os.system('pip install ./main/transformer_utils')
19
+ # hf_hub_download(repo_id="caizhongang/SMPLer-X", filename="smpler_x_h32.pth.tar", local_dir="/home/user/app/pretrained_models")
20
+ # /home/user/.pyenv/versions/3.9.19/lib/python3.9/site-packages/torchgeometry/core/conversions.py
21
+ os.system('cp -rf ./assets/conversions.py /home/user/.pyenv/versions/3.9.20/lib/python3.9/site-packages/torchgeometry/core/conversions.py')
22
+
23
+ def extract_frame_number(file_name):
24
+ match = re.search(r'(\d{5})', file_name)
25
+ if match:
26
+ return int(match.group(1))
27
+ return None
28
+
29
+ def merge_npz_files(npz_files, output_file):
30
+ npz_files = sorted(npz_files, key=lambda x: extract_frame_number(os.path.basename(x)))
31
+ merged_data = {}
32
+ for file in npz_files:
33
+ data = np.load(file)
34
+ for key in data.files:
35
+ if key not in merged_data:
36
+ merged_data[key] = []
37
+ merged_data[key].append(data[key])
38
+ for key in merged_data:
39
+ merged_data[key] = np.stack(merged_data[key], axis=0)
40
+ np.savez(output_file, **merged_data)
41
+
42
+ def npz_to_npz(pkl_path, npz_path):
43
+ # Load the pickle file
44
+ pkl_example = np.load(pkl_path, allow_pickle=True)
45
+ n = pkl_example["expression"].shape[0] # Assuming this is the batch size
46
+ full_pose = np.concatenate([pkl_example["global_orient"], pkl_example["body_pose"], pkl_example["jaw_pose"], pkl_example["leye_pose"], pkl_example["reye_pose"], pkl_example["left_hand_pose"], pkl_example["right_hand_pose"]], axis=1)
47
+ # print(full_pose.shape)
48
+ np.savez(npz_path,
49
+ betas=np.zeros(300),
50
+ poses=full_pose.reshape(n, -1),
51
+ expressions=np.zeros((n, 100)),
52
+ trans=pkl_example["transl"].reshape(n, -1),
53
+ model='smplx2020',
54
+ gender='neutral',
55
+ mocap_frame_rate=30,
56
+ )
57
+
58
+ def get_json(root_dir, output_dir):
59
+ clips = []
60
+ dirs = os.listdir(root_dir)
61
+ all_length = 0
62
+ for dir in dirs:
63
+ if not dir.endswith(".mp4"): continue
64
+ video_id = dir[:-4]
65
+ root = root_dir
66
+ try:
67
+ length = np.load(os.path.join(root, video_id+".npz"), allow_pickle=True)["poses"].shape[0]
68
+ all_length += length
69
+ except:
70
+ print("cant open ", dir)
71
+ continue
72
+ clip = {
73
+ "video_id": video_id,
74
+ "video_path": root[1:],
75
+ # "audio_path": root,
76
+ "motion_path": root[1:],
77
+ "mode": "test",
78
+ "start_idx": 0,
79
+ "end_idx": length
80
+ }
81
+ clips.append(clip)
82
+ if all_length < 1:
83
+ print(f"skip due to total frames is less than 1500 for {root_dir}")
84
+ return 0
85
+ else:
86
+ with open(output_dir, 'w') as f:
87
+ json.dump(clips, f, indent=4)
88
+ return all_length
89
+
90
+
91
+ def infer(video_input, in_threshold, num_people, render_mesh, inferer, OUT_FOLDER):
92
+ os.system(f'rm -rf {OUT_FOLDER}/smplx/*')
93
+ multi_person = num_people
94
+ cap = cv2.VideoCapture(video_input)
95
+ video_name = video_input.split("/")[-1]
96
+ success = 1
97
+ frame = 0
98
+ while success:
99
+ success, original_img = cap.read()
100
+ if not success:
101
+ break
102
+ frame += 1
103
+ _, _, _ = inferer.infer(original_img, in_threshold, frame, multi_person, not(render_mesh))
104
+ cap.release()
105
+ npz_files = [os.path.join(OUT_FOLDER, 'smplx', x) for x in os.listdir(os.path.join(OUT_FOLDER, 'smplx'))]
106
+
107
+ merge_npz_files(npz_files, os.path.join(OUT_FOLDER, video_name.replace(".mp4", ".npz")))
108
+ os.system(f'rm -r {OUT_FOLDER}/smplx')
109
+ npz_to_npz(os.path.join(OUT_FOLDER, video_name.replace(".mp4", ".npz")), os.path.join(OUT_FOLDER, video_name.replace(".mp4", ".npz")))
110
+ source = video_input
111
+ destination = os.path.join(OUT_FOLDER, video_name.replace('.mp4', '.npz')).replace('.npz', '.mp4')
112
+ shutil.copy(source, destination)
113
+
114
+ if __name__ == "__main__":
115
+ parser = argparse.ArgumentParser()
116
+ parser.add_argument("--video_folder_path", type=str, default="")
117
+ parser.add_argument("--data_save_path", type=str, default="")
118
+ parser.add_argument("--json_save_path", type=str, default="")
119
+ args = parser.parse_args()
120
+ video_folder = args.video_folder_path
121
+
122
+ DEFAULT_MODEL='smpler_x_s32'
123
+ OUT_FOLDER = args.data_save_path
124
+ os.makedirs(OUT_FOLDER, exist_ok=True)
125
+ num_gpus = 1 if torch.cuda.is_available() else -1
126
+ index = torch.cuda.current_device()
127
+ from main.inference import Inferer
128
+ inferer = Inferer(DEFAULT_MODEL, num_gpus, OUT_FOLDER)
129
+
130
+ for video_input in tqdm(os.listdir(video_folder)):
131
+ if not video_input.endswith(".mp4"):
132
+ continue
133
+ infer(os.path.join(video_folder, video_input), 0.5, False, False, inferer, OUT_FOLDER)
134
+ get_json(OUT_FOLDER, args.json_save_path)
135