77Licensed under Apache 2.0
88"""
99
10- import sys
1110import cv2
1211import os
1312import numpy as np
1615from tqdm import tqdm
1716import copy
1817
19- sys .path .append (os .getcwd ())
20-
2118# Auto-download checkpoint files if missing
2219from fmpose3d .lib .checkpoint .download_checkpoints import ensure_checkpoints
2320ensure_checkpoints ()
2825
2926args = parse_args ().parse ()
3027os .environ ["CUDA_VISIBLE_DEVICES" ] = args .gpu
31- if getattr (args , 'model_path' , '' ):
32- import importlib .util
33- import pathlib
34- model_abspath = os .path .abspath (args .model_path )
35- module_name = pathlib .Path (model_abspath ).stem
36- spec = importlib .util .spec_from_file_location (module_name , model_abspath )
37- module = importlib .util .module_from_spec (spec )
38- assert spec .loader is not None
39- spec .loader .exec_module (module )
40- CFM = getattr (module , 'Model' )
41-
28+
29+ from fmpose3d .models import get_model
30+ CFM = get_model (args .model_type )
31+
4232from fmpose3d .common .camera import *
4333
4434import matplotlib
5040matplotlib .rcParams ['pdf.fonttype' ] = 42
5141matplotlib .rcParams ['ps.fonttype' ] = 42
5242
53- def show2Dpose (kps , img ):
54- connections = [[0 , 1 ], [1 , 2 ], [2 , 3 ], [0 , 4 ], [4 , 5 ],
55- [5 , 6 ], [0 , 7 ], [7 , 8 ], [8 , 9 ], [9 , 10 ],
56- [8 , 11 ], [11 , 12 ], [12 , 13 ], [8 , 14 ], [14 , 15 ], [15 , 16 ]]
43+ # Shared skeleton definition so 2D/3D segment colors match
44+ SKELETON_CONNECTIONS = [
45+ [0 , 1 ], [1 , 2 ], [2 , 3 ], [0 , 4 ], [4 , 5 ],
46+ [5 , 6 ], [0 , 7 ], [7 , 8 ], [8 , 9 ], [9 , 10 ],
47+ [8 , 11 ], [11 , 12 ], [12 , 13 ], [8 , 14 ], [14 , 15 ], [15 , 16 ]
48+ ]
49+ # LR mask for skeleton segments: True -> left color, False -> right color
50+ SKELETON_LR = np .array (
51+ [0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 ],
52+ dtype = bool ,
53+ )
5754
58- LR = np .array ([0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0 , 0 , 0 ], dtype = bool )
55+ def show2Dpose (kps , img ):
56+ connections = SKELETON_CONNECTIONS
57+ LR = SKELETON_LR
5958
6059 lcolor = (255 , 0 , 0 )
6160 rcolor = (0 , 0 , 255 )
61+ # lcolor = (240, 176, 0)
62+ # rcolor = (240, 176, 0)
63+
6264 thickness = 3
6365
6466 for j ,c in enumerate (connections ):
@@ -67,8 +69,8 @@ def show2Dpose(kps, img):
6769 start = list (start )
6870 end = list (end )
6971 cv2 .line (img , (start [0 ], start [1 ]), (end [0 ], end [1 ]), lcolor if LR [j ] else rcolor , thickness )
70- cv2 .circle (img , (start [0 ], start [1 ]), thickness = - 1 , color = (0 , 255 , 0 ), radius = 3 )
71- cv2 .circle (img , (end [0 ], end [1 ]), thickness = - 1 , color = (0 , 255 , 0 ), radius = 3 )
72+ # cv2.circle(img, (start[0], start[1]), thickness=-1, color=(0, 255, 0), radius=3)
73+ # cv2.circle(img, (end[0], end[1]), thickness=-1, color=(0, 255, 0), radius=3)
7274
7375 return img
7476
@@ -77,11 +79,13 @@ def show3Dpose(vals, ax):
7779
7880 lcolor = (0 ,0 ,1 )
7981 rcolor = (1 ,0 ,0 )
80-
81- I = np .array ( [0 , 0 , 1 , 4 , 2 , 5 , 0 , 7 , 8 , 8 , 14 , 15 , 11 , 12 , 8 , 9 ])
82- J = np .array ( [1 , 4 , 2 , 5 , 3 , 6 , 7 , 8 , 14 , 11 , 15 , 16 , 12 , 13 , 9 , 10 ])
83-
84- LR = np .array ([0 , 1 , 0 , 1 , 0 , 1 , 0 , 0 , 0 , 1 , 0 , 0 , 1 , 1 , 0 , 0 ], dtype = bool )
82+ # lcolor=(0/255, 176/255, 240/255)
83+ # rcolor=(0/255, 176/255, 240/255)
84+
85+
86+ I = np .array ([c [0 ] for c in SKELETON_CONNECTIONS ])
87+ J = np .array ([c [1 ] for c in SKELETON_CONNECTIONS ])
88+ LR = SKELETON_LR
8589
8690 for i in np .arange ( len (I ) ):
8791 x , y , z = [np .array ( [vals [I [i ], j ], vals [J [i ], j ]] ) for j in range (3 )]
@@ -199,7 +203,8 @@ def get_3D_pose_from_image(args, keypoints, i, img, model, output_dir):
199203
200204 input_2D = input_2D [np .newaxis , :, :, :, :]
201205
202- input_2D = torch .from_numpy (input_2D .astype ('float32' )).cuda ()
206+ device = torch .device ("cuda" if torch .cuda .is_available () else "cpu" )
207+ input_2D = torch .from_numpy (input_2D .astype ('float32' )).to (device )
203208
204209 N = input_2D .size (0 )
205210
@@ -215,10 +220,10 @@ def euler_sample(c_2d, y_local, steps, model_3d):
215220
216221 ## estimation
217222
218- y = torch .randn (input_2D .size (0 ), input_2D .size (2 ), input_2D .size (3 ), 3 ). cuda ( )
223+ y = torch .randn (input_2D .size (0 ), input_2D .size (2 ), input_2D .size (3 ), 3 , device = device )
219224 output_3D_non_flip = euler_sample (input_2D [:, 0 ], y , steps = args .sample_steps , model_3d = model )
220225
221- y_flip = torch .randn (input_2D .size (0 ), input_2D .size (2 ), input_2D .size (3 ), 3 ). cuda ( )
226+ y_flip = torch .randn (input_2D .size (0 ), input_2D .size (2 ), input_2D .size (3 ), 3 , device = device )
222227 output_3D_flip = euler_sample (input_2D [:, 1 ], y_flip , steps = args .sample_steps , model_3d = model )
223228
224229 output_3D_flip [:, :, :, 0 ] *= - 1
@@ -266,14 +271,16 @@ def get_pose3D(path, output_dir, type='image'):
266271 # args.type = type
267272
268273 ## Reload
274+ device = torch .device ("cuda" if torch .cuda .is_available () else "cpu" )
275+
269276 model = {}
270- model ['CFM' ] = CFM (args ).cuda ( )
277+ model ['CFM' ] = CFM (args ).to ( device )
271278
272279 # if args.reload:
273280 model_dict = model ['CFM' ].state_dict ()
274- model_path = args .saved_model_path
281+ model_path = args .model_weights_path
275282 print (model_path )
276- pre_dict = torch .load (model_path )
283+ pre_dict = torch .load (model_path , map_location = device , weights_only = True )
277284 for name , key in model_dict .items ():
278285 model_dict [name ] = pre_dict [name ]
279286 model ['CFM' ].load_state_dict (model_dict )
@@ -336,7 +343,7 @@ def get_pose3D(path, output_dir, type='image'):
336343 ## save
337344 output_dir_pose = output_dir + 'pose/'
338345 os .makedirs (output_dir_pose , exist_ok = True )
339- plt .savefig (output_dir_pose + str (('%04d' % i )) + '_pose.jpg ' , dpi = 200 , bbox_inches = 'tight' )
346+ plt .savefig (output_dir_pose + str (('%04d' % i )) + '_pose.png ' , dpi = 200 , bbox_inches = 'tight' )
340347
341348
342349if __name__ == "__main__" :
0 commit comments