import torch def subsample_tensor(tensor, original_fps=30, target_fps=25): # Total number of original frames total_frames = tensor.shape[0] # Calculate new number of frames new_frames = int(total_frames * (target_fps / original_fps)) # Create indices for subsampling indices = torch.linspace(0, total_frames - 1, new_frames).long() # Select frames subsampled_tensor = tensor[indices] return subsampled_tensor def get_render(body_model_loaded, body_trans, body_orient, body_pose, output_path, text='', colors=[]): from renderer.utils import run_smpl_fwd_vertices vertices_list=[] if not isinstance(body_trans, list): body_trans = [body_trans] if not isinstance(body_orient, list): body_orient = [body_orient] if not isinstance(body_pose, list): body_pose = [body_pose] for trans, orient, pose in zip(body_trans, body_orient, body_pose): vertices= run_smpl_fwd_vertices(body_model_loaded, trans, orient, pose) vertices=vertices.vertices # vertices = subsample_tensor(vertices, original_fps=30, target_fps=25) vertices = vertices.detach().cpu().numpy() vertices_list.append(vertices) #Initialising the renderer from renderer.humor import HumorRenderer fps = 30.0 imw = 720 # 480 imh = 540 # 360 renderer = HumorRenderer(fps=fps, imw=imw, imh=imh) if len(vertices_list)==2: renderer(vertices_list, output_path, render_pair=True, fps=fps,colors=colors) else: renderer(vertices_list[0], output_path, render_pair=False, fps=fps,colors=colors) return output_path