From 0367d7a6228b99b86a1b142aa027e13301b65a79 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 11:38:11 +0100 Subject: [PATCH 01/13] WIP --- .../assets/articulation/articulation.py | 107 +++++++++++++++++- .../assets/rigid_object/rigid_object.py | 76 +++++++++++++ .../assets/articulation/articulation.py | 67 +++++++++++ .../deformable_object/deformable_object.py | 32 ++++++ .../assets/rigid_object/rigid_object.py | 36 ++++++ .../rigid_object_collection.py | 36 ++++++ .../assets/surface_gripper/surface_gripper.py | 27 +++++ 7 files changed, 378 insertions(+), 3 deletions(-) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index bc0c21b7005..3efd0b32841 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -471,6 +471,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim_index, @@ -526,6 +527,7 @@ def write_root_link_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) wp.launch( shared_kernels.set_root_link_pose_to_sim_mask, @@ -582,6 +584,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -647,6 +650,7 @@ def write_root_com_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) wp.launch( shared_kernels.set_root_com_pose_to_sim_mask, dim=root_pose.shape[0], @@ -762,6 +766,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim_index, @@ -813,6 +818,7 @@ def write_root_com_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) wp.launch( shared_kernels.set_root_com_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -863,6 +869,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -922,6 +929,7 @@ def write_root_link_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) wp.launch( shared_kernels.set_root_link_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -1004,6 +1012,7 @@ def write_joint_position_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -1058,6 +1067,7 @@ def write_joint_position_to_sim_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK + self.assert_shape_and_dtype_mask(position, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1110,6 +1120,7 @@ def write_joint_velocity_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( articulation_kernels.write_joint_vel_data_index, @@ -1154,6 +1165,7 @@ def write_joint_velocity_to_sim_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK + self.assert_shape_and_dtype_mask(velocity, (env_mask, joint_mask), wp.float32) wp.launch( articulation_kernels.write_joint_vel_data_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1216,6 +1228,7 @@ def write_joint_stiffness_to_sim_index( device=self.device, ) else: + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1272,6 +1285,7 @@ def write_joint_stiffness_to_sim_mask( device=self.device, ) else: + self.assert_shape_and_dtype_mask(stiffness, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1329,6 +1343,7 @@ def write_joint_damping_to_sim_index( device=self.device, ) else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1385,6 +1400,7 @@ def write_joint_damping_to_sim_mask( device=self.device, ) else: + self.assert_shape_and_dtype_mask(damping, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1435,6 +1451,7 @@ def write_joint_position_limit_to_sim_index( # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. if isinstance(limits, float): raise ValueError("Joint position limits must be a tensor or array, not a float.") + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f) wp.launch( articulation_kernels.write_joint_limit_data_to_buffer_index, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1497,6 +1514,7 @@ def write_joint_position_limit_to_sim_mask( clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) if isinstance(limits, float): raise ValueError("Joint position limits must be a tensor or array, not a float.") + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.vec2f) wp.launch( articulation_kernels.write_joint_limit_data_to_buffer_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1572,6 +1590,7 @@ def write_joint_velocity_limit_to_sim_index( device=self.device, ) else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1632,6 +1651,7 @@ def write_joint_velocity_limit_to_sim_mask( device=self.device, ) else: + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1692,6 +1712,7 @@ def write_joint_effort_limit_to_sim_index( device=self.device, ) else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1751,6 +1772,7 @@ def write_joint_effort_limit_to_sim_mask( device=self.device, ) else: + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1810,6 +1832,7 @@ def write_joint_armature_to_sim_index( device=self.device, ) else: + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1870,6 +1893,7 @@ def write_joint_armature_to_sim_mask( device=self.device, ) else: + self.assert_shape_and_dtype_mask(armature, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1927,6 +1951,7 @@ def write_joint_friction_coefficient_to_sim_index( device=self.device, ) else: + self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1984,6 +2009,7 @@ def write_joint_friction_coefficient_to_sim_mask( device=self.device, ) else: + self.assert_shape_and_dtype_mask(joint_friction_coeff, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2028,6 +2054,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2071,6 +2098,7 @@ def set_masses_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -2109,13 +2137,15 @@ def set_coms_index( aligned with the body frame. Args: - coms: Center of mass position of all bodies. Shape is (len(env_ids), len(body_ids), 3). + coms: Center of mass position of all bodies. Shape is (len(env_ids), len(body_ids), 3). In warp + the expected shape is (num_instances, num_bodies), with dtype wp.vec3f. body_ids: Body indices. If None, then all bodies are used. env_ids: Environment indices. If None, then all indices are used. """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_position_to_buffer_index, @@ -2155,7 +2185,8 @@ def set_coms_mask( aligned with the body frame. Args: - coms: Center of mass position of all bodies. Shape is (num_instances, num_bodies, 3). + coms: Center of mass position of all bodies. Shape is (num_instances, num_bodies). In warp + the expected shape is (num_instances, num_bodies), with dtype wp.vec3f. body_mask: Body mask. If None, then all bodies are used. Shape is (num_bodies,). env_mask: Environment mask. If None, then all the instances are updated. Shape is (num_instances,). """ @@ -2164,6 +2195,7 @@ def set_coms_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f) wp.launch( shared_kernels.write_body_com_position_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -2197,13 +2229,15 @@ def set_inertias_index( However, to allow graphed pipelines, the mask method must be used. Args: - inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9). + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 9). In warp + the expected shape is (num_instances, num_bodies, 9), with dtype wp.float32. body_ids: The body indices to set the inertias for. Defaults to None (all bodies). env_ids: The environment indices to set the inertias for. Defaults to None (all environments). """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer_index, @@ -2290,6 +2324,7 @@ def set_joint_position_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2331,6 +2366,7 @@ def set_joint_position_target_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2373,6 +2409,7 @@ def set_joint_velocity_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2415,6 +2452,7 @@ def set_joint_velocity_target_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2457,6 +2495,7 @@ def set_joint_effort_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2498,6 +2537,7 @@ def set_joint_effort_target_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -3136,6 +3176,67 @@ def write_spatial_tendon_properties_to_sim_mask( """ raise NotImplementedError() + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec2f): + offset = (2,) + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + + def assert_shape_and_dtype_mask( + self, tensor: float | torch.Tensor | wp.array, masks: tuple[wp.array, ...], dtype: type + ) -> None: + """Assert the shape of a tensor or warp array against mask dimensions. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + masks: Tuple of mask arrays whose shape[0] dimensions form the expected shape. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + shape = tuple(m.shape[0] for m in masks) + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec2f): + offset = (2,) + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + """ Internal helper. """ diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py index e8d66059c42..7210dd9104e 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py @@ -332,6 +332,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim_index, @@ -377,6 +378,7 @@ def write_root_link_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) wp.launch( shared_kernels.set_root_link_pose_to_sim_mask, @@ -423,6 +425,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -478,6 +481,7 @@ def write_root_com_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) wp.launch( shared_kernels.set_root_com_pose_to_sim_mask, dim=root_pose.shape[0], @@ -533,6 +537,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim_index, @@ -584,6 +589,7 @@ def write_root_com_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) wp.launch( shared_kernels.set_root_com_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -635,6 +641,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -691,6 +698,7 @@ def write_root_link_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) wp.launch( shared_kernels.set_root_link_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -746,6 +754,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -789,6 +798,7 @@ def set_masses_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -834,6 +844,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_position_to_buffer_index, @@ -882,6 +893,7 @@ def set_coms_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f) wp.launch( shared_kernels.write_body_com_position_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -922,6 +934,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer_index, @@ -981,6 +994,69 @@ def set_inertias_mask( # tell the physics engine that some of the body properties have been updated SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) + """ + Validation. + """ + + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + + def assert_shape_and_dtype_mask( + self, tensor: float | torch.Tensor | wp.array, masks: tuple[wp.array, ...], dtype: type + ) -> None: + """Assert the shape of a tensor or warp array against mask dimensions. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + masks: Tuple of mask arrays whose shape[0] dimensions form the expected shape. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + shape = tuple(m.shape[0] for m in masks) + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + """ Internal helper. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index f101be869f1..3c897223d3a 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -445,6 +445,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0]), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim, @@ -531,6 +532,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0]), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -681,6 +683,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0]), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim, @@ -770,6 +773,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0]), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -889,6 +893,7 @@ def write_joint_position_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -978,6 +983,7 @@ def write_joint_velocity_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.spatial_vectorf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( articulation_kernels.write_joint_vel_data, @@ -1064,6 +1070,7 @@ def write_joint_stiffness_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -1160,6 +1167,7 @@ def write_joint_damping_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(damping, float): wp.launch( @@ -1247,6 +1255,7 @@ def write_joint_position_limit_to_sim_index( Args: limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2) or (num_instances, num_joints, 2). + In warp the expected shape is (num_instances, num_joints), with dtype wp.vec2f. joint_ids: Joint indices. If None, then all joints are used. env_ids: Environment indices. If None, then all indices are used. full_data: Whether to expect full data. Defaults to False. @@ -1257,6 +1266,7 @@ def write_joint_position_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f) clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1367,6 +1377,7 @@ def write_joint_velocity_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limits, float): wp.launch( @@ -1470,6 +1481,7 @@ def write_joint_effort_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limits, float): wp.launch( @@ -1569,6 +1581,7 @@ def write_joint_armature_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(armature, float): wp.launch( @@ -1683,6 +1696,11 @@ def write_joint_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + if joint_dynamic_friction_coeff is not None: + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + if joint_viscous_friction_coeff is not None: + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1795,6 +1813,7 @@ def write_joint_dynamic_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1887,6 +1906,7 @@ def write_joint_viscous_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1980,6 +2000,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2061,6 +2082,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0], 7), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -2147,6 +2169,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer, @@ -2229,6 +2252,7 @@ def set_joint_position_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2309,6 +2333,7 @@ def set_joint_velocity_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2389,6 +2414,7 @@ def set_joint_effort_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2474,6 +2500,7 @@ def set_fixed_tendon_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2561,6 +2588,7 @@ def set_fixed_tendon_damping_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2648,6 +2676,7 @@ def set_fixed_tendon_limit_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2735,6 +2764,7 @@ def set_fixed_tendon_position_limit_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2822,6 +2852,7 @@ def set_fixed_tendon_rest_length_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2909,6 +2940,7 @@ def set_fixed_tendon_offset_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) + self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2983,6 +3015,7 @@ def write_fixed_tendon_properties_to_sim_index( """ # resolve indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(env_ids, (env_ids.shape[0]), wp.int32) # Write fixed tendon properties to the simulation. self.root_view.set_fixed_tendon_properties( self.data.fixed_tendon_stiffness, @@ -3046,6 +3079,7 @@ def set_spatial_tendon_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3133,6 +3167,7 @@ def set_spatial_tendon_damping_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3221,6 +3256,7 @@ def set_spatial_tendon_limit_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3308,6 +3344,7 @@ def set_spatial_tendon_offset_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) + self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3383,6 +3420,7 @@ def write_spatial_tendon_properties_to_sim_index( env_ids = self._ALL_INDICES elif isinstance(env_ids, list): env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + self.assert_shape_and_dtype(env_ids, (env_ids.shape[0]), wp.int32) # Write spatial tendon properties to the simulation. self.root_view.set_spatial_tendon_properties( self.data.spatial_tendon_stiffness, @@ -4170,6 +4208,35 @@ def _resolve_spatial_tendon_ids( return self._ALL_SPATIAL_TENDON_INDICES return spatial_tendon_ids + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec2f): + offset = (2,) + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + """ Deprecated methods. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index 1e18790c854..fe65c821250 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -237,6 +237,7 @@ def write_nodal_pos_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f) # convert torch to warp if needed if isinstance(nodal_pos, torch.Tensor): nodal_pos = wp.from_torch(nodal_pos.contiguous(), dtype=wp.vec3f) @@ -297,6 +298,7 @@ def write_nodal_velocity_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f) # convert torch to warp if needed if isinstance(nodal_vel, torch.Tensor): nodal_vel = wp.from_torch(nodal_vel.contiguous(), dtype=wp.vec3f) @@ -361,6 +363,7 @@ def write_nodal_kinematic_target_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f) # convert torch to warp if needed, ensuring 2D (num_envs, V, 4) -> (num_envs, V) vec4f if isinstance(targets, torch.Tensor): if targets.dim() == 2: @@ -402,6 +405,35 @@ def write_nodal_kinematic_target_to_sim_mask( env_ids = self._ALL_INDICES self.write_nodal_kinematic_target_to_sim_index(targets, env_ids=env_ids, full_data=True) + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.vec4f): + offset = (4,) + elif isinstance(dtype, vec6f): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + """ Operations - Deprecated wrappers. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 825128c8b30..d14af08ac6c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -332,6 +332,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) wp.launch( shared_kernels.set_root_link_pose_to_sim, dim=env_ids.shape[0], @@ -412,6 +413,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) wp.launch( shared_kernels.set_root_com_pose_to_sim, dim=env_ids.shape[0], @@ -499,6 +501,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) wp.launch( shared_kernels.set_root_com_velocity_to_sim, dim=env_ids.shape[0], @@ -588,6 +591,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) # Access body_com_pose_b and root_link_pose_w properties to ensure they are current. wp.launch( shared_kernels.set_root_link_velocity_to_sim, @@ -683,6 +687,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -766,6 +771,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0], 7), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -848,6 +854,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_single_body_inertia_to_buffer, @@ -1049,6 +1056,35 @@ def _invalidate_initialize_callback(self, event): # set all existing views to None to invalidate them self._root_view = None + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + @property def root_physx_view(self) -> physx.RigidBodyView: """Deprecated property. Please use :attr:`root_view` instead.""" diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 0c2e37d94c7..76ff213330d 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -419,6 +419,7 @@ def write_body_link_pose_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf) wp.launch( shared_kernels.set_body_link_pose_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -511,6 +512,7 @@ def write_body_com_pose_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf) wp.launch( shared_kernels.set_body_com_pose_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -607,6 +609,7 @@ def write_body_com_velocity_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf) wp.launch( shared_kernels.set_body_com_velocity_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -707,6 +710,7 @@ def write_body_link_velocity_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf) # Access body_com_pose_b and body_link_pose_w to ensure they are current. wp.launch( shared_kernels.set_body_link_velocity_to_sim, @@ -810,6 +814,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -891,6 +896,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0], 7), wp.transformf) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -975,6 +981,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer, @@ -1350,6 +1357,35 @@ def _on_prim_deletion(self, prim_path: str) -> None: self._clear_callbacks() return + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.vec3f): + offset = (3,) + elif isinstance(dtype, wp.transformf): + offset = (7,) + elif isinstance(dtype, wp.spatial_vectorf): + offset = (6,) + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + """ Deprecated properties and methods. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index 28da4654454..29ff19149fa 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -165,6 +165,7 @@ def set_grippers_command_index( """ if env_ids is None: env_ids = self._ALL_INDICES + self.assert_shape_and_dtype(states, (env_ids.shape[0],), wp.float32) # Convert torch input to warp if isinstance(states, torch.Tensor): @@ -233,6 +234,7 @@ def update_gripper_properties_index( (retry_interval, self._retry_interval), ]: if prop_data is not None: + self.assert_shape_and_dtype(prop_data, (env_ids.shape[0],), wp.float32) wp.launch( write_scalar_at_indices, dim=env_ids.shape[0], @@ -419,6 +421,31 @@ def reset(self, indices: torch.Tensor | None = None) -> None: env_ids = self._resolve_env_ids(indices) self.reset_index(env_ids) + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The shape to assert. + dtype: The warp dtype to assert. + """ + if __debug__: + if isinstance(tensor, float): + return + if isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" + if isinstance(tensor, torch.Tensor): + if isinstance(dtype, wp.float32): + offset = () + elif isinstance(dtype, wp.int32): + offset = () + else: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + """ Initialization. """ From 8c42e936b9fb435f5147b4f4b5ed4dbe9a1d2bc5 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 12:57:26 +0100 Subject: [PATCH 02/13] WIP --- .../assets/articulation/articulation.py | 137 ++++++++++-------- .../assets/rigid_object/rigid_object.py | 85 ++++++----- .../assets/articulation/articulation.py | 95 ++++++------ .../deformable_object/deformable_object.py | 31 ++-- .../assets/rigid_object/rigid_object.py | 39 ++--- .../rigid_object_collection.py | 39 ++--- .../assets/surface_gripper/surface_gripper.py | 35 +++-- 7 files changed, 254 insertions(+), 207 deletions(-) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index 3efd0b32841..26b7afd62b9 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -471,7 +471,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim_index, @@ -527,7 +527,7 @@ def write_root_link_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_link_pose_to_sim_mask, @@ -584,7 +584,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -650,7 +650,7 @@ def write_root_com_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_com_pose_to_sim_mask, dim=root_pose.shape[0], @@ -766,7 +766,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim_index, @@ -818,7 +818,7 @@ def write_root_com_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") wp.launch( shared_kernels.set_root_com_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -869,7 +869,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -929,7 +929,7 @@ def write_root_link_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") wp.launch( shared_kernels.set_root_link_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -1012,7 +1012,7 @@ def write_joint_position_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -1067,7 +1067,7 @@ def write_joint_position_to_sim_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK - self.assert_shape_and_dtype_mask(position, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(position, (env_mask, joint_mask), wp.float32, "position") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1120,7 +1120,7 @@ def write_joint_velocity_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( articulation_kernels.write_joint_vel_data_index, @@ -1165,7 +1165,7 @@ def write_joint_velocity_to_sim_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK - self.assert_shape_and_dtype_mask(velocity, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(velocity, (env_mask, joint_mask), wp.float32, "velocity") wp.launch( articulation_kernels.write_joint_vel_data_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1228,7 +1228,7 @@ def write_joint_stiffness_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "stiffness") wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1285,7 +1285,7 @@ def write_joint_stiffness_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(stiffness, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(stiffness, (env_mask, joint_mask), wp.float32, "stiffness") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1343,7 +1343,7 @@ def write_joint_damping_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "damping") wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1400,7 +1400,7 @@ def write_joint_damping_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(damping, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(damping, (env_mask, joint_mask), wp.float32, "damping") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1451,7 +1451,7 @@ def write_joint_position_limit_to_sim_index( # Note: we are doing a single launch for faster performance. Prior versions would do this in multiple launches. if isinstance(limits, float): raise ValueError("Joint position limits must be a tensor or array, not a float.") - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f, "limits") wp.launch( articulation_kernels.write_joint_limit_data_to_buffer_index, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1514,7 +1514,7 @@ def write_joint_position_limit_to_sim_mask( clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) if isinstance(limits, float): raise ValueError("Joint position limits must be a tensor or array, not a float.") - self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.vec2f) + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.vec2f, "limits") wp.launch( articulation_kernels.write_joint_limit_data_to_buffer_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1590,7 +1590,7 @@ def write_joint_velocity_limit_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1651,7 +1651,7 @@ def write_joint_velocity_limit_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32, "limits") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1712,7 +1712,7 @@ def write_joint_effort_limit_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1772,7 +1772,7 @@ def write_joint_effort_limit_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(limits, (env_mask, joint_mask), wp.float32, "limits") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1832,7 +1832,7 @@ def write_joint_armature_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "armature") wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -1893,7 +1893,7 @@ def write_joint_armature_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(armature, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(armature, (env_mask, joint_mask), wp.float32, "armature") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -1951,7 +1951,7 @@ def write_joint_friction_coefficient_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff") wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -2009,7 +2009,7 @@ def write_joint_friction_coefficient_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(joint_friction_coeff, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(joint_friction_coeff, (env_mask, joint_mask), wp.float32, "joint_friction_coeff") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2054,7 +2054,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2098,7 +2098,7 @@ def set_masses_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK - self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32) + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32, "masses") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -2145,7 +2145,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_position_to_buffer_index, @@ -2195,7 +2195,7 @@ def set_coms_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK - self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f) + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f, "coms") wp.launch( shared_kernels.write_body_com_position_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -2237,7 +2237,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer_index, @@ -2281,6 +2281,9 @@ def set_inertias_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask( + inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,) + ) wp.launch( shared_kernels.write_body_inertia_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -2324,7 +2327,7 @@ def set_joint_position_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2366,7 +2369,7 @@ def set_joint_position_target_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK - self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32, "target") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2409,7 +2412,7 @@ def set_joint_velocity_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2452,7 +2455,7 @@ def set_joint_velocity_target_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK - self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32, "target") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2495,7 +2498,7 @@ def set_joint_effort_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2537,7 +2540,7 @@ def set_joint_effort_target_mask( env_mask = self._ALL_ENV_MASK if joint_mask is None: joint_mask = self._ALL_JOINT_MASK - self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32) + self.assert_shape_and_dtype_mask(target, (env_mask, joint_mask), wp.float32, "target") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -3177,7 +3180,7 @@ def write_spatial_tendon_properties_to_sim_mask( raise NotImplementedError() def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -3185,28 +3188,38 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): + return + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec2f): + elif dtype is wp.vec2f: offset = (2,) - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" def assert_shape_and_dtype_mask( - self, tensor: float | torch.Tensor | wp.array, masks: tuple[wp.array, ...], dtype: type + self, + tensor: float | torch.Tensor | wp.array, + masks: tuple[wp.array, ...], + dtype: type, + name: str = "", + trailing_dims: tuple[int, ...] = (), ) -> None: """Assert the shape of a tensor or warp array against mask dimensions. @@ -3214,28 +3227,32 @@ def assert_shape_and_dtype_mask( tensor: The tensor or warp array to assert the shape of. Floats are skipped. masks: Tuple of mask arrays whose shape[0] dimensions form the expected shape. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. + trailing_dims: Extra trailing dimensions to append (e.g. (9,) for inertias with wp.float32). """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - shape = tuple(m.shape[0] for m in masks) + shape = (*tuple(m.shape[0] for m in masks), *trailing_dims) if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec2f): + elif dtype is wp.vec2f: offset = (2,) - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Internal helper. diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py index 7210dd9104e..d8294b8cc00 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py @@ -332,7 +332,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim_index, @@ -378,7 +378,7 @@ def write_root_link_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_link_pose_to_sim_mask, @@ -425,7 +425,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -481,7 +481,7 @@ def write_root_com_pose_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf) + self.assert_shape_and_dtype_mask(root_pose, (env_mask,), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_com_pose_to_sim_mask, dim=root_pose.shape[0], @@ -537,7 +537,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim_index, @@ -589,7 +589,7 @@ def write_root_com_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") wp.launch( shared_kernels.set_root_com_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -641,7 +641,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -698,7 +698,7 @@ def write_root_link_velocity_to_sim_mask( """ if env_mask is None: env_mask = self._ALL_ENV_MASK - self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf) + self.assert_shape_and_dtype_mask(root_velocity, (env_mask,), wp.spatial_vectorf, "root_velocity") wp.launch( shared_kernels.set_root_link_velocity_to_sim_mask, dim=root_velocity.shape[0], @@ -754,7 +754,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -798,7 +798,7 @@ def set_masses_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK - self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32) + self.assert_shape_and_dtype_mask(masses, (env_mask, body_mask), wp.float32, "masses") wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -844,7 +844,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.vec3f, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_position_to_buffer_index, @@ -893,7 +893,7 @@ def set_coms_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK - self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f) + self.assert_shape_and_dtype_mask(coms, (env_mask, body_mask), wp.vec3f, "coms") wp.launch( shared_kernels.write_body_com_position_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -934,7 +934,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer_index, @@ -978,6 +978,9 @@ def set_inertias_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK + self.assert_shape_and_dtype_mask( + inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,) + ) wp.launch( shared_kernels.write_body_inertia_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), @@ -999,7 +1002,7 @@ def set_inertias_mask( """ def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -1007,28 +1010,36 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" def assert_shape_and_dtype_mask( - self, tensor: float | torch.Tensor | wp.array, masks: tuple[wp.array, ...], dtype: type + self, + tensor: float | torch.Tensor | wp.array, + masks: tuple[wp.array, ...], + dtype: type, + name: str = "", + trailing_dims: tuple[int, ...] = (), ) -> None: """Assert the shape of a tensor or warp array against mask dimensions. @@ -1036,26 +1047,30 @@ def assert_shape_and_dtype_mask( tensor: The tensor or warp array to assert the shape of. Floats are skipped. masks: Tuple of mask arrays whose shape[0] dimensions form the expected shape. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. + trailing_dims: Extra trailing dimensions to append (e.g. (9,) for inertias with wp.float32). """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - shape = tuple(m.shape[0] for m in masks) + shape = (*tuple(m.shape[0] for m in masks), *trailing_dims) if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Internal helper. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index 3c897223d3a..b71ab358f19 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -445,7 +445,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0]), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim, @@ -532,7 +532,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0]), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -683,7 +683,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0]), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim, @@ -773,7 +773,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0]), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -893,7 +893,7 @@ def write_joint_position_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -983,7 +983,7 @@ def write_joint_velocity_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.spatial_vectorf) + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( articulation_kernels.write_joint_vel_data, @@ -1070,7 +1070,7 @@ def write_joint_stiffness_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -1167,7 +1167,7 @@ def write_joint_damping_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(damping, float): wp.launch( @@ -1266,7 +1266,7 @@ def write_joint_position_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f, "limits") clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1377,7 +1377,7 @@ def write_joint_velocity_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limits, float): wp.launch( @@ -1481,7 +1481,7 @@ def write_joint_effort_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limits, float): wp.launch( @@ -1581,7 +1581,7 @@ def write_joint_armature_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "armature") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(armature, float): wp.launch( @@ -1696,11 +1696,11 @@ def write_joint_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff") if joint_dynamic_friction_coeff is not None: - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") if joint_viscous_friction_coeff is not None: - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1813,7 +1813,7 @@ def write_joint_dynamic_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1906,7 +1906,7 @@ def write_joint_viscous_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -2000,7 +2000,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2082,7 +2082,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0], 7), wp.transformf) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -2169,7 +2169,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer, @@ -2252,7 +2252,7 @@ def set_joint_position_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2333,7 +2333,7 @@ def set_joint_velocity_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2414,7 +2414,7 @@ def set_joint_effort_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2500,7 +2500,7 @@ def set_fixed_tendon_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2588,7 +2588,7 @@ def set_fixed_tendon_damping_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2676,7 +2676,7 @@ def set_fixed_tendon_limit_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2764,7 +2764,7 @@ def set_fixed_tendon_position_limit_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2852,7 +2852,7 @@ def set_fixed_tendon_rest_length_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2940,7 +2940,7 @@ def set_fixed_tendon_offset_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "offset") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3015,7 +3015,6 @@ def write_fixed_tendon_properties_to_sim_index( """ # resolve indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(env_ids, (env_ids.shape[0]), wp.int32) # Write fixed tendon properties to the simulation. self.root_view.set_fixed_tendon_properties( self.data.fixed_tendon_stiffness, @@ -3079,7 +3078,7 @@ def set_spatial_tendon_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3167,7 +3166,7 @@ def set_spatial_tendon_damping_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3256,7 +3255,7 @@ def set_spatial_tendon_limit_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3344,7 +3343,7 @@ def set_spatial_tendon_offset_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "offset") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -3420,7 +3419,6 @@ def write_spatial_tendon_properties_to_sim_index( env_ids = self._ALL_INDICES elif isinstance(env_ids, list): env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - self.assert_shape_and_dtype(env_ids, (env_ids.shape[0]), wp.int32) # Write spatial tendon properties to the simulation. self.root_view.set_spatial_tendon_properties( self.data.spatial_tendon_stiffness, @@ -4209,7 +4207,7 @@ def _resolve_spatial_tendon_ids( return spatial_tendon_ids def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -4217,25 +4215,30 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): + return + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec2f): + elif dtype is wp.vec2f: offset = (2,) - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Deprecated methods. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index fe65c821250..0c01e6374ac 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -237,7 +237,7 @@ def write_nodal_pos_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f) + self.assert_shape_and_dtype(nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos") # convert torch to warp if needed if isinstance(nodal_pos, torch.Tensor): nodal_pos = wp.from_torch(nodal_pos.contiguous(), dtype=wp.vec3f) @@ -298,7 +298,7 @@ def write_nodal_velocity_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f) + self.assert_shape_and_dtype(nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel") # convert torch to warp if needed if isinstance(nodal_vel, torch.Tensor): nodal_vel = wp.from_torch(nodal_vel.contiguous(), dtype=wp.vec3f) @@ -363,7 +363,7 @@ def write_nodal_kinematic_target_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f) + self.assert_shape_and_dtype(targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f, "targets") # convert torch to warp if needed, ensuring 2D (num_envs, V, 4) -> (num_envs, V) vec4f if isinstance(targets, torch.Tensor): if targets.dim() == 2: @@ -406,7 +406,7 @@ def write_nodal_kinematic_target_to_sim_mask( self.write_nodal_kinematic_target_to_sim_index(targets, env_ids=env_ids, full_data=True) def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -414,25 +414,28 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.vec4f): + elif dtype is wp.vec4f: offset = (4,) - elif isinstance(dtype, vec6f): + elif dtype is vec6f: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Operations - Deprecated wrappers. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index d14af08ac6c..9a8dff16fdb 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -332,7 +332,7 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_link_pose_to_sim, dim=env_ids.shape[0], @@ -413,7 +413,7 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf) + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_com_pose_to_sim, dim=env_ids.shape[0], @@ -501,7 +501,7 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") wp.launch( shared_kernels.set_root_com_velocity_to_sim, dim=env_ids.shape[0], @@ -591,7 +591,7 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf) + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Access body_com_pose_b and root_link_pose_w properties to ensure they are current. wp.launch( shared_kernels.set_root_link_velocity_to_sim, @@ -687,7 +687,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -771,7 +771,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0], 7), wp.transformf) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -854,7 +854,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_single_body_inertia_to_buffer, @@ -1057,7 +1057,7 @@ def _invalidate_initialize_callback(self, event): self._root_view = None def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -1065,25 +1065,28 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" @property def root_physx_view(self) -> physx.RigidBodyView: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 76ff213330d..69c62293c95 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -419,7 +419,7 @@ def write_body_link_pose_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf) + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") wp.launch( shared_kernels.set_body_link_pose_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -512,7 +512,7 @@ def write_body_com_pose_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf) + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") wp.launch( shared_kernels.set_body_com_pose_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -609,7 +609,7 @@ def write_body_com_velocity_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf) + self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") wp.launch( shared_kernels.set_body_com_velocity_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -710,7 +710,7 @@ def write_body_link_velocity_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf) + self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") # Access body_com_pose_b and body_link_pose_w to ensure they are current. wp.launch( shared_kernels.set_body_link_velocity_to_sim, @@ -814,7 +814,7 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32) + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -896,7 +896,7 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0], 7), wp.transformf) + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -981,7 +981,7 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32) + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer, @@ -1358,7 +1358,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: return def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -1366,25 +1366,28 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.vec3f): + elif dtype is wp.vec3f: offset = (3,) - elif isinstance(dtype, wp.transformf): + elif dtype is wp.transformf: offset = (7,) - elif isinstance(dtype, wp.spatial_vectorf): + elif dtype is wp.spatial_vectorf: offset = (6,) else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Deprecated properties and methods. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index 29ff19149fa..d45517ca803 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -165,7 +165,7 @@ def set_grippers_command_index( """ if env_ids is None: env_ids = self._ALL_INDICES - self.assert_shape_and_dtype(states, (env_ids.shape[0],), wp.float32) + self.assert_shape_and_dtype(states, (env_ids.shape[0],), wp.float32, "states") # Convert torch input to warp if isinstance(states, torch.Tensor): @@ -227,14 +227,14 @@ def update_gripper_properties_index( if env_ids is None: env_ids = self._ALL_INDICES - for prop_data, prop_buf in [ - (max_grip_distance, self._max_grip_distance), - (coaxial_force_limit, self._coaxial_force_limit), - (shear_force_limit, self._shear_force_limit), - (retry_interval, self._retry_interval), + for prop_name, prop_data, prop_buf in [ + ("max_grip_distance", max_grip_distance, self._max_grip_distance), + ("coaxial_force_limit", coaxial_force_limit, self._coaxial_force_limit), + ("shear_force_limit", shear_force_limit, self._shear_force_limit), + ("retry_interval", retry_interval, self._retry_interval), ]: if prop_data is not None: - self.assert_shape_and_dtype(prop_data, (env_ids.shape[0],), wp.float32) + self.assert_shape_and_dtype(prop_data, (env_ids.shape[0],), wp.float32, prop_name) wp.launch( write_scalar_at_indices, dim=env_ids.shape[0], @@ -422,7 +422,7 @@ def reset(self, indices: torch.Tensor | None = None) -> None: self.reset_index(env_ids) def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" ) -> None: """Assert the shape and dtype of a tensor or warp array. @@ -430,21 +430,24 @@ def assert_shape_and_dtype( tensor: The tensor or warp array to assert the shape of. Floats are skipped. shape: The shape to assert. dtype: The warp dtype to assert. + name: Optional parameter name for error messages. """ if __debug__: - if isinstance(tensor, float): + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): return - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"Shape mismatch: {tensor.shape} != {shape}" - if isinstance(tensor, torch.Tensor): - if isinstance(dtype, wp.float32): + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + if dtype is wp.float32: offset = () - elif isinstance(dtype, wp.int32): + elif dtype is wp.int32: offset = () else: raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Initialization. From 135b7946ae4ba9988de8220b3c63395f442e1655 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 13:12:22 +0100 Subject: [PATCH 03/13] support float values for tendons --- .../assets/articulation/base_articulation.py | 40 +- .../assets/articulation/articulation.py | 40 +- .../assets/articulation/articulation.py | 470 ++++++++++++------ .../test/assets/test_articulation.py | 8 +- 4 files changed, 354 insertions(+), 204 deletions(-) diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index 73c4b0fed21..f5da1cebb0d 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -1435,7 +1435,7 @@ def set_joint_effort_target_mask( def set_fixed_tendon_stiffness_index( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1463,7 +1463,7 @@ def set_fixed_tendon_stiffness_index( def set_fixed_tendon_stiffness_mask( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1492,7 +1492,7 @@ def set_fixed_tendon_stiffness_mask( def set_fixed_tendon_damping_index( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1520,7 +1520,7 @@ def set_fixed_tendon_damping_index( def set_fixed_tendon_damping_mask( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1549,7 +1549,7 @@ def set_fixed_tendon_damping_mask( def set_fixed_tendon_limit_stiffness_index( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1577,7 +1577,7 @@ def set_fixed_tendon_limit_stiffness_index( def set_fixed_tendon_limit_stiffness_mask( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1606,7 +1606,7 @@ def set_fixed_tendon_limit_stiffness_mask( def set_fixed_tendon_position_limit_index( self, *, - limit: torch.Tensor | wp.array, + limit: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1634,7 +1634,7 @@ def set_fixed_tendon_position_limit_index( def set_fixed_tendon_position_limit_mask( self, *, - limit: torch.Tensor | wp.array, + limit: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1663,7 +1663,7 @@ def set_fixed_tendon_position_limit_mask( def set_fixed_tendon_rest_length_index( self, *, - rest_length: torch.Tensor | wp.array, + rest_length: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1691,7 +1691,7 @@ def set_fixed_tendon_rest_length_index( def set_fixed_tendon_rest_length_mask( self, *, - rest_length: torch.Tensor | wp.array, + rest_length: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1720,7 +1720,7 @@ def set_fixed_tendon_rest_length_mask( def set_fixed_tendon_offset_index( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1748,7 +1748,7 @@ def set_fixed_tendon_offset_index( def set_fixed_tendon_offset_mask( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1822,7 +1822,7 @@ def write_fixed_tendon_properties_to_sim_mask( def set_spatial_tendon_stiffness_index( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1850,7 +1850,7 @@ def set_spatial_tendon_stiffness_index( def set_spatial_tendon_stiffness_mask( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1879,7 +1879,7 @@ def set_spatial_tendon_stiffness_mask( def set_spatial_tendon_damping_index( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1907,7 +1907,7 @@ def set_spatial_tendon_damping_index( def set_spatial_tendon_damping_mask( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1936,7 +1936,7 @@ def set_spatial_tendon_damping_mask( def set_spatial_tendon_limit_stiffness_index( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -1965,7 +1965,7 @@ def set_spatial_tendon_limit_stiffness_index( def set_spatial_tendon_limit_stiffness_mask( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -1994,7 +1994,7 @@ def set_spatial_tendon_limit_stiffness_mask( def set_spatial_tendon_offset_index( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2022,7 +2022,7 @@ def set_spatial_tendon_offset_index( def set_spatial_tendon_offset_mask( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index 26b7afd62b9..b8d80cd93cf 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -2563,7 +2563,7 @@ def set_joint_effort_target_mask( def set_fixed_tendon_stiffness_index( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2590,7 +2590,7 @@ def set_fixed_tendon_stiffness_index( def set_fixed_tendon_stiffness_mask( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2618,7 +2618,7 @@ def set_fixed_tendon_stiffness_mask( def set_fixed_tendon_damping_index( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2645,7 +2645,7 @@ def set_fixed_tendon_damping_index( def set_fixed_tendon_damping_mask( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2673,7 +2673,7 @@ def set_fixed_tendon_damping_mask( def set_fixed_tendon_limit_stiffness_index( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2700,7 +2700,7 @@ def set_fixed_tendon_limit_stiffness_index( def set_fixed_tendon_limit_stiffness_mask( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2728,7 +2728,7 @@ def set_fixed_tendon_limit_stiffness_mask( def set_fixed_tendon_position_limit_index( self, *, - limit: torch.Tensor | wp.array, + limit: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2755,7 +2755,7 @@ def set_fixed_tendon_position_limit_index( def set_fixed_tendon_position_limit_mask( self, *, - limit: torch.Tensor | wp.array, + limit: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2783,7 +2783,7 @@ def set_fixed_tendon_position_limit_mask( def set_fixed_tendon_rest_length_index( self, *, - rest_length: torch.Tensor | wp.array, + rest_length: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2810,7 +2810,7 @@ def set_fixed_tendon_rest_length_index( def set_fixed_tendon_rest_length_mask( self, *, - rest_length: torch.Tensor | wp.array, + rest_length: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2838,7 +2838,7 @@ def set_fixed_tendon_rest_length_mask( def set_fixed_tendon_offset_index( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2865,7 +2865,7 @@ def set_fixed_tendon_offset_index( def set_fixed_tendon_offset_mask( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2927,7 +2927,7 @@ def write_fixed_tendon_properties_to_sim_mask( def set_spatial_tendon_stiffness_index( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -2954,7 +2954,7 @@ def set_spatial_tendon_stiffness_index( def set_spatial_tendon_stiffness_mask( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2982,7 +2982,7 @@ def set_spatial_tendon_stiffness_mask( def set_spatial_tendon_damping_index( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -3009,7 +3009,7 @@ def set_spatial_tendon_damping_index( def set_spatial_tendon_damping_mask( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -3037,7 +3037,7 @@ def set_spatial_tendon_damping_mask( def set_spatial_tendon_limit_stiffness_index( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -3065,7 +3065,7 @@ def set_spatial_tendon_limit_stiffness_index( def set_spatial_tendon_limit_stiffness_mask( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -3093,7 +3093,7 @@ def set_spatial_tendon_limit_stiffness_mask( def set_spatial_tendon_offset_index( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, ) -> None: @@ -3120,7 +3120,7 @@ def set_spatial_tendon_offset_index( def set_spatial_tendon_offset_mask( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index b71ab358f19..15c07795d54 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -2472,7 +2472,7 @@ def set_joint_effort_target_mask( def set_fixed_tendon_stiffness_index( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -2502,26 +2502,41 @@ def set_fixed_tendon_stiffness_index( fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), - inputs=[ - stiffness, - env_ids, - fixed_tendon_ids, - full_data, - ], - outputs=[ - self.data._fixed_tendon_stiffness, - ], - device=self.device, - ) + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_stiffness, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the stiffness to the simulation. def set_fixed_tendon_stiffness_mask( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2560,7 +2575,7 @@ def set_fixed_tendon_stiffness_mask( def set_fixed_tendon_damping_index( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -2590,26 +2605,41 @@ def set_fixed_tendon_damping_index( fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), - inputs=[ - damping, - env_ids, - fixed_tendon_ids, - full_data, - ], - outputs=[ - self.data._fixed_tendon_damping, - ], - device=self.device, - ) + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_damping, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_damping, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the damping to the simulation. def set_fixed_tendon_damping_mask( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2648,7 +2678,7 @@ def set_fixed_tendon_damping_mask( def set_fixed_tendon_limit_stiffness_index( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -2678,26 +2708,41 @@ def set_fixed_tendon_limit_stiffness_index( fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), - inputs=[ - limit_stiffness, - env_ids, - fixed_tendon_ids, - full_data, - ], - outputs=[ - self.data._fixed_tendon_limit_stiffness, - ], - device=self.device, - ) + if isinstance(limit_stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_limit_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_limit_stiffness, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the limit stiffness to the simulation. def set_fixed_tendon_limit_stiffness_mask( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2736,7 +2781,7 @@ def set_fixed_tendon_limit_stiffness_mask( def set_fixed_tendon_position_limit_index( self, *, - limit: torch.Tensor | wp.array, + limit: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -2766,26 +2811,41 @@ def set_fixed_tendon_position_limit_index( fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), - inputs=[ - limit, - env_ids, - fixed_tendon_ids, - full_data, - ], - outputs=[ - self.data._fixed_tendon_pos_limits, - ], - device=self.device, - ) + if isinstance(limit, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_pos_limits, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + limit, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_pos_limits, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the position limit to the simulation. def set_fixed_tendon_position_limit_mask( self, *, - limit: torch.Tensor | wp.array, + limit: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2824,7 +2884,7 @@ def set_fixed_tendon_position_limit_mask( def set_fixed_tendon_rest_length_index( self, *, - rest_length: torch.Tensor | wp.array, + rest_length: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -2854,26 +2914,41 @@ def set_fixed_tendon_rest_length_index( fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), - inputs=[ - rest_length, - env_ids, - fixed_tendon_ids, - full_data, - ], - outputs=[ - self.data._fixed_tendon_rest_length, - ], - device=self.device, - ) + if isinstance(rest_length, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + rest_length, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_rest_length, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + rest_length, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_rest_length, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the rest length to the simulation. def set_fixed_tendon_rest_length_mask( self, *, - rest_length: torch.Tensor | wp.array, + rest_length: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -2912,7 +2987,7 @@ def set_fixed_tendon_rest_length_mask( def set_fixed_tendon_offset_index( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -2942,26 +3017,41 @@ def set_fixed_tendon_offset_index( fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "offset") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), - inputs=[ - offset, - env_ids, - fixed_tendon_ids, - full_data, - ], - outputs=[ - self.data._fixed_tendon_offset, - ], - device=self.device, - ) + if isinstance(offset, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + fixed_tendon_ids, + ], + outputs=[ + self.data._fixed_tendon_offset, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], fixed_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + fixed_tendon_ids, + full_data, + ], + outputs=[ + self.data._fixed_tendon_offset, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the offset to the simulation. def set_fixed_tendon_offset_mask( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, fixed_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -3050,7 +3140,7 @@ def write_fixed_tendon_properties_to_sim_mask( def set_spatial_tendon_stiffness_index( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -3080,26 +3170,41 @@ def set_spatial_tendon_stiffness_index( spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), - inputs=[ - stiffness, - env_ids, - spatial_tendon_ids, - full_data, - ], - outputs=[ - self.data._spatial_tendon_stiffness, - ], - device=self.device, - ) + if isinstance(stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + stiffness, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_stiffness, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the stiffness to the simulation. def set_spatial_tendon_stiffness_mask( self, *, - stiffness: torch.Tensor | wp.array, + stiffness: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -3138,7 +3243,7 @@ def set_spatial_tendon_stiffness_mask( def set_spatial_tendon_damping_index( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -3168,26 +3273,41 @@ def set_spatial_tendon_damping_index( spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), - inputs=[ - damping, - env_ids, - spatial_tendon_ids, - full_data, - ], - outputs=[ - self.data._spatial_tendon_damping, - ], - device=self.device, - ) + if isinstance(damping, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_damping, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + damping, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_damping, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the damping to the simulation. def set_spatial_tendon_damping_mask( self, *, - damping: torch.Tensor | wp.array, + damping: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -3226,7 +3346,7 @@ def set_spatial_tendon_damping_mask( def set_spatial_tendon_limit_stiffness_index( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -3257,26 +3377,41 @@ def set_spatial_tendon_limit_stiffness_index( spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), - inputs=[ - limit_stiffness, - env_ids, - spatial_tendon_ids, - full_data, - ], - outputs=[ - self.data._spatial_tendon_limit_stiffness, - ], - device=self.device, - ) + if isinstance(limit_stiffness, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_limit_stiffness, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + limit_stiffness, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_limit_stiffness, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the limit stiffness to the simulation. def set_spatial_tendon_limit_stiffness_mask( self, *, - limit_stiffness: torch.Tensor | wp.array, + limit_stiffness: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: @@ -3315,7 +3450,7 @@ def set_spatial_tendon_limit_stiffness_mask( def set_spatial_tendon_offset_index( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None, env_ids: Sequence[int] | torch.Tensor | wp.array | None = None, full_data: bool = False, @@ -3345,26 +3480,41 @@ def set_spatial_tendon_offset_index( spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "offset") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. - wp.launch( - shared_kernels.write_2d_data_to_buffer_with_indices, - dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), - inputs=[ - offset, - env_ids, - spatial_tendon_ids, - full_data, - ], - outputs=[ - self.data._spatial_tendon_offset, - ], - device=self.device, - ) + if isinstance(offset, float): + wp.launch( + articulation_kernels.float_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + spatial_tendon_ids, + ], + outputs=[ + self.data._spatial_tendon_offset, + ], + device=self.device, + ) + else: + wp.launch( + shared_kernels.write_2d_data_to_buffer_with_indices, + dim=(env_ids.shape[0], spatial_tendon_ids.shape[0]), + inputs=[ + offset, + env_ids, + spatial_tendon_ids, + full_data, + ], + outputs=[ + self.data._spatial_tendon_offset, + ], + device=self.device, + ) # Only updates internal buffers, does not apply the offset to the simulation. def set_spatial_tendon_offset_mask( self, *, - offset: torch.Tensor | wp.array, + offset: float | torch.Tensor | wp.array, spatial_tendon_mask: wp.array | None = None, env_mask: wp.array | None = None, ) -> None: diff --git a/source/isaaclab_physx/test/assets/test_articulation.py b/source/isaaclab_physx/test/assets/test_articulation.py index a771e555b94..5653d5ec6e6 100644 --- a/source/isaaclab_physx/test/assets/test_articulation.py +++ b/source/isaaclab_physx/test/assets/test_articulation.py @@ -2124,10 +2124,10 @@ def test_spatial_tendons(sim, num_articulations, device): assert wp.to_torch(articulation.data.body_inertia).shape == (num_articulations, articulation.num_bodies, 9) assert articulation.num_spatial_tendons == 1 - articulation.set_spatial_tendon_stiffness_index(stiffness=torch.tensor([10.0], device=device)) - articulation.set_spatial_tendon_limit_stiffness_index(limit_stiffness=torch.tensor([10.0], device=device)) - articulation.set_spatial_tendon_damping_index(damping=torch.tensor([10.0], device=device)) - articulation.set_spatial_tendon_offset_index(offset=torch.tensor([10.0], device=device)) + articulation.set_spatial_tendon_stiffness_index(stiffness=10.0) + articulation.set_spatial_tendon_limit_stiffness_index(limit_stiffness=10.0) + articulation.set_spatial_tendon_damping_index(damping=10.0) + articulation.set_spatial_tendon_offset_index(offset=10.0) # Simulate physics for _ in range(10): From 824dce9d084ed3639f9a1564ca03c5cf1414f11e Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 13:37:58 +0100 Subject: [PATCH 04/13] improvements --- source/isaaclab/isaaclab/assets/asset_base.py | 71 +++++++ .../assets/articulation/articulation.py | 75 ------- .../assets/rigid_object/rigid_object.py | 75 ------- .../assets/articulation/articulation.py | 199 ++++++++++++------ .../deformable_object/deformable_object.py | 48 ++--- .../assets/rigid_object/rigid_object.py | 66 +++--- .../rigid_object_collection.py | 66 +++--- .../assets/surface_gripper/surface_gripper.py | 37 +--- .../test/assets/test_deformable_object.py | 2 +- 9 files changed, 282 insertions(+), 357 deletions(-) diff --git a/source/isaaclab/isaaclab/assets/asset_base.py b/source/isaaclab/isaaclab/assets/asset_base.py index e6bd12220b3..a276b019c22 100644 --- a/source/isaaclab/isaaclab/assets/asset_base.py +++ b/source/isaaclab/isaaclab/assets/asset_base.py @@ -13,6 +13,7 @@ from typing import TYPE_CHECKING, Any import torch +import warp as wp import isaaclab.sim as sim_utils from isaaclab.physics import PhysicsEvent, PhysicsManager @@ -235,6 +236,76 @@ def update(self, dt: float): """ raise NotImplementedError + """ + Validation. + """ + + # Mapping from warp dtype to the trailing dimensions that a torch.Tensor + # would have for the same data. Subclasses may extend this (e.g. custom + # ``vec6f`` in deformable objects) by updating the dict in their ``__init__``. + _DTYPE_TO_TORCH_TRAILING_DIMS: dict[type, tuple[int, ...]] = { + wp.float32: (), + wp.int32: (), + wp.vec2f: (2,), + wp.vec3f: (3,), + wp.vec4f: (4,), + wp.transformf: (7,), + wp.spatial_vectorf: (6,), + } + + def assert_shape_and_dtype( + self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" + ) -> None: + """Assert the shape and dtype of a tensor or warp array. + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + shape: The expected leading dimensions (e.g. ``(num_envs, num_joints)``). + dtype: The expected warp dtype. + name: Optional parameter name for error messages. + """ + if __debug__: + cls = type(self).__name__ + prefix = f"{cls}: '{name}' " if name else f"{cls}: " + if isinstance(tensor, (int, float)): + return + elif isinstance(tensor, wp.array): + assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" + assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" + elif isinstance(tensor, torch.Tensor): + offset = self._DTYPE_TO_TORCH_TRAILING_DIMS.get(dtype) + if offset is None: + raise ValueError(f"Unsupported dtype: {dtype}") + assert tensor.shape == (*shape, *offset), ( + f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" + ) + + def assert_shape_and_dtype_mask( + self, + tensor: float | torch.Tensor | wp.array, + masks: tuple[wp.array, ...], + dtype: type, + name: str = "", + trailing_dims: tuple[int, ...] = (), + ) -> None: + """Assert the shape of a tensor or warp array against mask dimensions. + + Mask-based write methods expect **full-sized** data — one element per entry in each mask + dimension, regardless of how many entries are ``True``. The expected leading shape is therefore + ``(mask_0.shape[0], mask_1.shape[0], ...)`` (i.e. the *total* size of each dimension, not the + number of selected entries). + + Args: + tensor: The tensor or warp array to assert the shape of. Floats are skipped. + masks: Tuple of mask arrays whose ``shape[0]`` dimensions form the expected leading shape. + dtype: The expected warp dtype. + name: Optional parameter name for error messages. + trailing_dims: Extra trailing dimensions to append (e.g. ``(9,)`` for inertias with ``wp.float32``). + """ + if __debug__: + shape = (*tuple(m.shape[0] for m in masks), *trailing_dims) + self.assert_shape_and_dtype(tensor, shape, dtype, name) + """ Implementation specific. """ diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index b8d80cd93cf..a179626c015 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -3179,81 +3179,6 @@ def write_spatial_tendon_properties_to_sim_mask( """ raise NotImplementedError() - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec2f: - offset = (2,) - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" - - def assert_shape_and_dtype_mask( - self, - tensor: float | torch.Tensor | wp.array, - masks: tuple[wp.array, ...], - dtype: type, - name: str = "", - trailing_dims: tuple[int, ...] = (), - ) -> None: - """Assert the shape of a tensor or warp array against mask dimensions. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - masks: Tuple of mask arrays whose shape[0] dimensions form the expected shape. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - trailing_dims: Extra trailing dimensions to append (e.g. (9,) for inertias with wp.float32). - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - shape = (*tuple(m.shape[0] for m in masks), *trailing_dims) - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec2f: - offset = (2,) - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" - """ Internal helper. """ diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py index d8294b8cc00..79106cd992e 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py @@ -997,81 +997,6 @@ def set_inertias_mask( # tell the physics engine that some of the body properties have been updated SimulationManager.add_model_change(SolverNotifyFlags.BODY_INERTIAL_PROPERTIES) - """ - Validation. - """ - - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" - - def assert_shape_and_dtype_mask( - self, - tensor: float | torch.Tensor | wp.array, - masks: tuple[wp.array, ...], - dtype: type, - name: str = "", - trailing_dims: tuple[int, ...] = (), - ) -> None: - """Assert the shape of a tensor or warp array against mask dimensions. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - masks: Tuple of mask arrays whose shape[0] dimensions form the expected shape. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - trailing_dims: Extra trailing dimensions to append (e.g. (9,) for inertias with wp.float32). - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - shape = (*tuple(m.shape[0] for m in masks), *trailing_dims) - if isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" - """ Internal helper. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index 15c07795d54..e457f0c5352 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -445,7 +445,10 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_link_pose_to_sim, @@ -532,7 +535,10 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would call # write_root_link_pose_to_sim after this. @@ -683,7 +689,10 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.set_root_com_velocity_to_sim, @@ -773,7 +782,10 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. # Note: we are doing a single launch for faster performance. Prior versions would do multiple launches. wp.launch( @@ -893,7 +905,10 @@ def write_joint_position_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") + if full_data: + self.assert_shape_and_dtype(position, (self.num_instances, self.num_joints), wp.float32, "position") + else: + self.assert_shape_and_dtype(position, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "position") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -983,7 +998,10 @@ def write_joint_velocity_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") + if full_data: + self.assert_shape_and_dtype(velocity, (self.num_instances, self.num_joints), wp.float32, "velocity") + else: + self.assert_shape_and_dtype(velocity, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "velocity") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( articulation_kernels.write_joint_vel_data, @@ -1070,7 +1088,10 @@ def write_joint_stiffness_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "stiffness") + if full_data: + self.assert_shape_and_dtype(stiffness, (self.num_instances, self.num_joints), wp.float32, "stiffness") + else: + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -1167,7 +1188,10 @@ def write_joint_damping_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "damping") + if full_data: + self.assert_shape_and_dtype(damping, (self.num_instances, self.num_joints), wp.float32, "damping") + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(damping, float): wp.launch( @@ -1266,7 +1290,10 @@ def write_joint_position_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f, "limits") + if full_data: + self.assert_shape_and_dtype(limits, (self.num_instances, self.num_joints), wp.vec2f, "limits") + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.vec2f, "limits") clamped_defaults = wp.zeros(1, dtype=wp.int32, device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1377,7 +1404,10 @@ def write_joint_velocity_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") + if full_data: + self.assert_shape_and_dtype(limits, (self.num_instances, self.num_joints), wp.float32, "limits") + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limits, float): wp.launch( @@ -1481,7 +1511,10 @@ def write_joint_effort_limit_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") + if full_data: + self.assert_shape_and_dtype(limits, (self.num_instances, self.num_joints), wp.float32, "limits") + else: + self.assert_shape_and_dtype(limits, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "limits") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limits, float): wp.launch( @@ -1581,7 +1614,10 @@ def write_joint_armature_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "armature") + if full_data: + self.assert_shape_and_dtype(armature, (self.num_instances, self.num_joints), wp.float32, "armature") + else: + self.assert_shape_and_dtype(armature, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "armature") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(armature, float): wp.launch( @@ -1696,11 +1732,20 @@ def write_joint_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff") + if full_data: + self.assert_shape_and_dtype(joint_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_friction_coeff") + else: + self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff") if joint_dynamic_friction_coeff is not None: - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") + if full_data: + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_dynamic_friction_coeff") + else: + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") if joint_viscous_friction_coeff is not None: - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") + if full_data: + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_viscous_friction_coeff") + else: + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1813,7 +1858,10 @@ def write_joint_dynamic_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") + if full_data: + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_dynamic_friction_coeff") + else: + self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1906,7 +1954,10 @@ def write_joint_viscous_friction_coefficient_to_sim_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") + if full_data: + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_viscous_friction_coeff") + else: + self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -2000,7 +2051,10 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + if full_data: + self.assert_shape_and_dtype(masses, (self.num_instances, self.num_bodies), wp.float32, "masses") + else: + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2082,7 +2136,10 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") + if full_data: + self.assert_shape_and_dtype(coms, (self.num_instances, self.num_bodies), wp.transformf, "coms") + else: + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -2169,7 +2226,10 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + if full_data: + self.assert_shape_and_dtype(inertias, (self.num_instances, self.num_bodies, 9), wp.float32, "inertias") + else: + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer, @@ -2252,7 +2312,10 @@ def set_joint_position_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + if full_data: + self.assert_shape_and_dtype(target, (self.num_instances, self.num_joints), wp.float32, "target") + else: + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2333,7 +2396,10 @@ def set_joint_velocity_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + if full_data: + self.assert_shape_and_dtype(target, (self.num_instances, self.num_joints), wp.float32, "target") + else: + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2414,7 +2480,10 @@ def set_joint_effort_target_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) - self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") + if full_data: + self.assert_shape_and_dtype(target, (self.num_instances, self.num_joints), wp.float32, "target") + else: + self.assert_shape_and_dtype(target, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "target") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -2500,7 +2569,10 @@ def set_fixed_tendon_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness") + if full_data: + self.assert_shape_and_dtype(stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "stiffness") + else: + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -2603,7 +2675,10 @@ def set_fixed_tendon_damping_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "damping") + if full_data: + self.assert_shape_and_dtype(damping, (self.num_instances, self.num_fixed_tendons), wp.float32, "damping") + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(damping, float): wp.launch( @@ -2706,7 +2781,10 @@ def set_fixed_tendon_limit_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness") + if full_data: + self.assert_shape_and_dtype(limit_stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "limit_stiffness") + else: + self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limit_stiffness, float): wp.launch( @@ -2809,7 +2887,10 @@ def set_fixed_tendon_position_limit_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit") + if full_data: + self.assert_shape_and_dtype(limit, (self.num_instances, self.num_fixed_tendons), wp.float32, "limit") + else: + self.assert_shape_and_dtype(limit, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limit, float): wp.launch( @@ -2912,7 +2993,10 @@ def set_fixed_tendon_rest_length_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length") + if full_data: + self.assert_shape_and_dtype(rest_length, (self.num_instances, self.num_fixed_tendons), wp.float32, "rest_length") + else: + self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(rest_length, float): wp.launch( @@ -3015,7 +3099,10 @@ def set_fixed_tendon_offset_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) - self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "offset") + if full_data: + self.assert_shape_and_dtype(offset, (self.num_instances, self.num_fixed_tendons), wp.float32, "offset") + else: + self.assert_shape_and_dtype(offset, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "offset") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(offset, float): wp.launch( @@ -3168,7 +3255,10 @@ def set_spatial_tendon_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness") + if full_data: + self.assert_shape_and_dtype(stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "stiffness") + else: + self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -3271,7 +3361,10 @@ def set_spatial_tendon_damping_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "damping") + if full_data: + self.assert_shape_and_dtype(damping, (self.num_instances, self.num_spatial_tendons), wp.float32, "damping") + else: + self.assert_shape_and_dtype(damping, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "damping") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(damping, float): wp.launch( @@ -3375,7 +3468,10 @@ def set_spatial_tendon_limit_stiffness_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness") + if full_data: + self.assert_shape_and_dtype(limit_stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "limit_stiffness") + else: + self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limit_stiffness, float): wp.launch( @@ -3478,7 +3574,10 @@ def set_spatial_tendon_offset_index( # resolve indices env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) - self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "offset") + if full_data: + self.assert_shape_and_dtype(offset, (self.num_instances, self.num_spatial_tendons), wp.float32, "offset") + else: + self.assert_shape_and_dtype(offset, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "offset") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(offset, float): wp.launch( @@ -4356,40 +4455,6 @@ def _resolve_spatial_tendon_ids( return self._ALL_SPATIAL_TENDON_INDICES return spatial_tendon_ids - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec2f: - offset = (2,) - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" - """ Deprecated methods. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index 0c01e6374ac..8d083462d5f 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -78,6 +78,8 @@ def __init__(self, cfg: DeformableObjectCfg): cfg: A configuration instance. """ super().__init__(cfg) + # Register custom vec6f type for nodal state validation. + self._DTYPE_TO_TORCH_TRAILING_DIMS = {**self._DTYPE_TO_TORCH_TRAILING_DIMS, vec6f: (6,)} """ Properties @@ -237,7 +239,10 @@ def write_nodal_pos_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos") + if full_data: + self.assert_shape_and_dtype(nodal_pos, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos") + else: + self.assert_shape_and_dtype(nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos") # convert torch to warp if needed if isinstance(nodal_pos, torch.Tensor): nodal_pos = wp.from_torch(nodal_pos.contiguous(), dtype=wp.vec3f) @@ -298,7 +303,10 @@ def write_nodal_velocity_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel") + if full_data: + self.assert_shape_and_dtype(nodal_vel, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel") + else: + self.assert_shape_and_dtype(nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel") # convert torch to warp if needed if isinstance(nodal_vel, torch.Tensor): nodal_vel = wp.from_torch(nodal_vel.contiguous(), dtype=wp.vec3f) @@ -363,7 +371,10 @@ def write_nodal_kinematic_target_to_sim_index( """ # resolve env_ids env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f, "targets") + if full_data: + self.assert_shape_and_dtype(targets, (self.num_instances, self.max_sim_vertices_per_body), wp.vec4f, "targets") + else: + self.assert_shape_and_dtype(targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f, "targets") # convert torch to warp if needed, ensuring 2D (num_envs, V, 4) -> (num_envs, V) vec4f if isinstance(targets, torch.Tensor): if targets.dim() == 2: @@ -405,37 +416,6 @@ def write_nodal_kinematic_target_to_sim_mask( env_ids = self._ALL_INDICES self.write_nodal_kinematic_target_to_sim_index(targets, env_ids=env_ids, full_data=True) - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.vec4f: - offset = (4,) - elif dtype is vec6f: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Operations - Deprecated wrappers. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 9a8dff16fdb..13ca7fd43d2 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -332,7 +332,10 @@ def write_root_link_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_link_pose_to_sim, dim=env_ids.shape[0], @@ -413,7 +416,10 @@ def write_root_com_pose_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") + if full_data: + self.assert_shape_and_dtype(root_pose, (self.num_instances,), wp.transformf, "root_pose") + else: + self.assert_shape_and_dtype(root_pose, (env_ids.shape[0],), wp.transformf, "root_pose") wp.launch( shared_kernels.set_root_com_pose_to_sim, dim=env_ids.shape[0], @@ -501,7 +507,10 @@ def write_root_com_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") wp.launch( shared_kernels.set_root_com_velocity_to_sim, dim=env_ids.shape[0], @@ -591,7 +600,10 @@ def write_root_link_velocity_to_sim_index( """ # resolve all indices env_ids = self._resolve_env_ids(env_ids) - self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") + if full_data: + self.assert_shape_and_dtype(root_velocity, (self.num_instances,), wp.spatial_vectorf, "root_velocity") + else: + self.assert_shape_and_dtype(root_velocity, (env_ids.shape[0],), wp.spatial_vectorf, "root_velocity") # Access body_com_pose_b and root_link_pose_w properties to ensure they are current. wp.launch( shared_kernels.set_root_link_velocity_to_sim, @@ -687,7 +699,10 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + if full_data: + self.assert_shape_and_dtype(masses, (self.num_instances, self.num_bodies), wp.float32, "masses") + else: + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -771,7 +786,10 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") + if full_data: + self.assert_shape_and_dtype(coms, (self.num_instances, self.num_bodies), wp.transformf, "coms") + else: + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -854,7 +872,10 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + if full_data: + self.assert_shape_and_dtype(inertias, (self.num_instances, self.num_bodies, 9), wp.float32, "inertias") + else: + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_single_body_inertia_to_buffer, @@ -1056,37 +1077,6 @@ def _invalidate_initialize_callback(self, event): # set all existing views to None to invalidate them self._root_view = None - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" @property def root_physx_view(self) -> physx.RigidBodyView: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 69c62293c95..0d9415a3738 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -419,7 +419,10 @@ def write_body_link_pose_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") + if full_data: + self.assert_shape_and_dtype(body_poses, (self.num_instances, self.num_bodies), wp.transformf, "body_poses") + else: + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") wp.launch( shared_kernels.set_body_link_pose_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -512,7 +515,10 @@ def write_body_com_pose_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") + if full_data: + self.assert_shape_and_dtype(body_poses, (self.num_instances, self.num_bodies), wp.transformf, "body_poses") + else: + self.assert_shape_and_dtype(body_poses, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "body_poses") wp.launch( shared_kernels.set_body_com_pose_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -609,7 +615,10 @@ def write_body_com_velocity_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") + if full_data: + self.assert_shape_and_dtype(body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities") + else: + self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") wp.launch( shared_kernels.set_body_com_velocity_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -710,7 +719,10 @@ def write_body_link_velocity_to_sim_index( """ env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") + if full_data: + self.assert_shape_and_dtype(body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities") + else: + self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") # Access body_com_pose_b and body_link_pose_w to ensure they are current. wp.launch( shared_kernels.set_body_link_velocity_to_sim, @@ -814,7 +826,10 @@ def set_masses_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") + if full_data: + self.assert_shape_and_dtype(masses, (self.num_instances, self.num_bodies), wp.float32, "masses") + else: + self.assert_shape_and_dtype(masses, (env_ids.shape[0], body_ids.shape[0]), wp.float32, "masses") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, @@ -896,7 +911,10 @@ def set_coms_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") + if full_data: + self.assert_shape_and_dtype(coms, (self.num_instances, self.num_bodies), wp.transformf, "coms") + else: + self.assert_shape_and_dtype(coms, (env_ids.shape[0], body_ids.shape[0]), wp.transformf, "coms") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_com_pose_to_buffer, @@ -981,7 +999,10 @@ def set_inertias_index( # resolve all indices env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) - self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") + if full_data: + self.assert_shape_and_dtype(inertias, (self.num_instances, self.num_bodies, 9), wp.float32, "inertias") + else: + self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( shared_kernels.write_body_inertia_to_buffer, @@ -1357,37 +1378,6 @@ def _on_prim_deletion(self, prim_path: str) -> None: self._clear_callbacks() return - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.vec3f: - offset = (3,) - elif dtype is wp.transformf: - offset = (7,) - elif dtype is wp.spatial_vectorf: - offset = (6,) - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Deprecated properties and methods. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index d45517ca803..7abe60aa1bc 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -165,7 +165,10 @@ def set_grippers_command_index( """ if env_ids is None: env_ids = self._ALL_INDICES - self.assert_shape_and_dtype(states, (env_ids.shape[0],), wp.float32, "states") + if full_data: + self.assert_shape_and_dtype(states, (self.num_instances,), wp.float32, "states") + else: + self.assert_shape_and_dtype(states, (env_ids.shape[0],), wp.float32, "states") # Convert torch input to warp if isinstance(states, torch.Tensor): @@ -234,7 +237,10 @@ def update_gripper_properties_index( ("retry_interval", retry_interval, self._retry_interval), ]: if prop_data is not None: - self.assert_shape_and_dtype(prop_data, (env_ids.shape[0],), wp.float32, prop_name) + if full_data: + self.assert_shape_and_dtype(prop_data, (self.num_instances,), wp.float32, prop_name) + else: + self.assert_shape_and_dtype(prop_data, (env_ids.shape[0],), wp.float32, prop_name) wp.launch( write_scalar_at_indices, dim=env_ids.shape[0], @@ -421,33 +427,6 @@ def reset(self, indices: torch.Tensor | None = None) -> None: env_ids = self._resolve_env_ids(indices) self.reset_index(env_ids) - def assert_shape_and_dtype( - self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = "" - ) -> None: - """Assert the shape and dtype of a tensor or warp array. - - Args: - tensor: The tensor or warp array to assert the shape of. Floats are skipped. - shape: The shape to assert. - dtype: The warp dtype to assert. - name: Optional parameter name for error messages. - """ - if __debug__: - cls = type(self).__name__ - prefix = f"{cls}: '{name}' " if name else f"{cls}: " - if isinstance(tensor, (int, float)): - return - elif isinstance(tensor, wp.array): - assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}" - assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}" - elif isinstance(tensor, torch.Tensor): - if dtype is wp.float32: - offset = () - elif dtype is wp.int32: - offset = () - else: - raise ValueError(f"Unsupported dtype: {dtype}") - assert tensor.shape == (*shape, *offset), f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}" """ Initialization. diff --git a/source/isaaclab_physx/test/assets/test_deformable_object.py b/source/isaaclab_physx/test/assets/test_deformable_object.py index 31992cad5de..3021a95e359 100644 --- a/source/isaaclab_physx/test/assets/test_deformable_object.py +++ b/source/isaaclab_physx/test/assets/test_deformable_object.py @@ -328,7 +328,7 @@ def test_set_kinematic_targets(sim, num_cubes): nodal_kinematic_targets[0, :, 3] = 0.0 nodal_kinematic_targets[0, :, :3] = wp.to_torch(cube_object.data.default_nodal_state_w)[0, :, :3] cube_object.write_nodal_kinematic_target_to_sim_index( - nodal_kinematic_targets[0], env_ids=torch.tensor([0], device=sim.device) + nodal_kinematic_targets[0].unsqueeze(0), env_ids=torch.tensor([0], device=sim.device) ) for _ in range(20): From e3cface3daeb399e1b655ef844ea8910e7b003ec Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 13:38:29 +0100 Subject: [PATCH 05/13] pre-commits --- .../assets/articulation/articulation.py | 12 +- .../assets/rigid_object/rigid_object.py | 4 +- .../assets/articulation/articulation.py | 104 ++++++++++++++---- .../deformable_object/deformable_object.py | 25 +++-- .../assets/rigid_object/rigid_object.py | 1 - .../rigid_object_collection.py | 17 ++- .../assets/surface_gripper/surface_gripper.py | 1 - 7 files changed, 122 insertions(+), 42 deletions(-) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index a179626c015..108576891ef 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -1951,7 +1951,9 @@ def write_joint_friction_coefficient_to_sim_index( device=self.device, ) else: - self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff") + self.assert_shape_and_dtype( + joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff" + ) wp.launch( shared_kernels.write_2d_data_to_buffer_with_indices, dim=(env_ids.shape[0], joint_ids.shape[0]), @@ -2009,7 +2011,9 @@ def write_joint_friction_coefficient_to_sim_mask( device=self.device, ) else: - self.assert_shape_and_dtype_mask(joint_friction_coeff, (env_mask, joint_mask), wp.float32, "joint_friction_coeff") + self.assert_shape_and_dtype_mask( + joint_friction_coeff, (env_mask, joint_mask), wp.float32, "joint_friction_coeff" + ) wp.launch( shared_kernels.write_2d_data_to_buffer_with_mask, dim=(env_mask.shape[0], joint_mask.shape[0]), @@ -2281,9 +2285,7 @@ def set_inertias_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK - self.assert_shape_and_dtype_mask( - inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,) - ) + self.assert_shape_and_dtype_mask(inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,)) wp.launch( shared_kernels.write_body_inertia_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), diff --git a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py index 79106cd992e..2092a8a1b51 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/rigid_object/rigid_object.py @@ -978,9 +978,7 @@ def set_inertias_mask( env_mask = self._ALL_ENV_MASK if body_mask is None: body_mask = self._ALL_BODY_MASK - self.assert_shape_and_dtype_mask( - inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,) - ) + self.assert_shape_and_dtype_mask(inertias, (env_mask, body_mask), wp.float32, "inertias", trailing_dims=(9,)) wp.launch( shared_kernels.write_body_inertia_to_buffer_mask, dim=(env_mask.shape[0], body_mask.shape[0]), diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index e457f0c5352..1f451b55a23 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -1733,19 +1733,43 @@ def write_joint_friction_coefficient_to_sim_index( env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) if full_data: - self.assert_shape_and_dtype(joint_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_friction_coeff") + self.assert_shape_and_dtype( + joint_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_friction_coeff" + ) else: - self.assert_shape_and_dtype(joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff") + self.assert_shape_and_dtype( + joint_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_friction_coeff" + ) if joint_dynamic_friction_coeff is not None: if full_data: - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_dynamic_friction_coeff") + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_dynamic_friction_coeff", + ) else: - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_dynamic_friction_coeff", + ) if joint_viscous_friction_coeff is not None: if full_data: - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_viscous_friction_coeff") + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_viscous_friction_coeff", + ) else: - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_viscous_friction_coeff", + ) # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1859,9 +1883,19 @@ def write_joint_dynamic_friction_coefficient_to_sim_index( env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) if full_data: - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_dynamic_friction_coeff") + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_dynamic_friction_coeff", + ) else: - self.assert_shape_and_dtype(joint_dynamic_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_dynamic_friction_coeff") + self.assert_shape_and_dtype( + joint_dynamic_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_dynamic_friction_coeff", + ) # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -1955,9 +1989,19 @@ def write_joint_viscous_friction_coefficient_to_sim_index( env_ids = self._resolve_env_ids(env_ids) joint_ids = self._resolve_joint_ids(joint_ids) if full_data: - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (self.num_instances, self.num_joints), wp.float32, "joint_viscous_friction_coeff") + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (self.num_instances, self.num_joints), + wp.float32, + "joint_viscous_friction_coeff", + ) else: - self.assert_shape_and_dtype(joint_viscous_friction_coeff, (env_ids.shape[0], joint_ids.shape[0]), wp.float32, "joint_viscous_friction_coeff") + self.assert_shape_and_dtype( + joint_viscous_friction_coeff, + (env_ids.shape[0], joint_ids.shape[0]), + wp.float32, + "joint_viscous_friction_coeff", + ) # Get the friction properties from the simulation. friction_props = wp.clone(self.root_view.get_dof_friction_properties(), device=self.device) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. @@ -2570,9 +2614,13 @@ def set_fixed_tendon_stiffness_index( env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) if full_data: - self.assert_shape_and_dtype(stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "stiffness") + self.assert_shape_and_dtype( + stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "stiffness" + ) else: - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness") + self.assert_shape_and_dtype( + stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "stiffness" + ) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -2782,9 +2830,13 @@ def set_fixed_tendon_limit_stiffness_index( env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) if full_data: - self.assert_shape_and_dtype(limit_stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "limit_stiffness") + self.assert_shape_and_dtype( + limit_stiffness, (self.num_instances, self.num_fixed_tendons), wp.float32, "limit_stiffness" + ) else: - self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness") + self.assert_shape_and_dtype( + limit_stiffness, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "limit_stiffness" + ) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limit_stiffness, float): wp.launch( @@ -2994,9 +3046,13 @@ def set_fixed_tendon_rest_length_index( env_ids = self._resolve_env_ids(env_ids) fixed_tendon_ids = self._resolve_fixed_tendon_ids(fixed_tendon_ids) if full_data: - self.assert_shape_and_dtype(rest_length, (self.num_instances, self.num_fixed_tendons), wp.float32, "rest_length") + self.assert_shape_and_dtype( + rest_length, (self.num_instances, self.num_fixed_tendons), wp.float32, "rest_length" + ) else: - self.assert_shape_and_dtype(rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length") + self.assert_shape_and_dtype( + rest_length, (env_ids.shape[0], fixed_tendon_ids.shape[0]), wp.float32, "rest_length" + ) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(rest_length, float): wp.launch( @@ -3256,9 +3312,13 @@ def set_spatial_tendon_stiffness_index( env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) if full_data: - self.assert_shape_and_dtype(stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "stiffness") + self.assert_shape_and_dtype( + stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "stiffness" + ) else: - self.assert_shape_and_dtype(stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness") + self.assert_shape_and_dtype( + stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "stiffness" + ) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(stiffness, float): wp.launch( @@ -3469,9 +3529,13 @@ def set_spatial_tendon_limit_stiffness_index( env_ids = self._resolve_env_ids(env_ids) spatial_tendon_ids = self._resolve_spatial_tendon_ids(spatial_tendon_ids) if full_data: - self.assert_shape_and_dtype(limit_stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "limit_stiffness") + self.assert_shape_and_dtype( + limit_stiffness, (self.num_instances, self.num_spatial_tendons), wp.float32, "limit_stiffness" + ) else: - self.assert_shape_and_dtype(limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness") + self.assert_shape_and_dtype( + limit_stiffness, (env_ids.shape[0], spatial_tendon_ids.shape[0]), wp.float32, "limit_stiffness" + ) # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. if isinstance(limit_stiffness, float): wp.launch( diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index 8d083462d5f..34dafff38a0 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -240,9 +240,13 @@ def write_nodal_pos_to_sim_index( # resolve env_ids env_ids = self._resolve_env_ids(env_ids) if full_data: - self.assert_shape_and_dtype(nodal_pos, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos") + self.assert_shape_and_dtype( + nodal_pos, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos" + ) else: - self.assert_shape_and_dtype(nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos") + self.assert_shape_and_dtype( + nodal_pos, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_pos" + ) # convert torch to warp if needed if isinstance(nodal_pos, torch.Tensor): nodal_pos = wp.from_torch(nodal_pos.contiguous(), dtype=wp.vec3f) @@ -304,9 +308,13 @@ def write_nodal_velocity_to_sim_index( # resolve env_ids env_ids = self._resolve_env_ids(env_ids) if full_data: - self.assert_shape_and_dtype(nodal_vel, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel") + self.assert_shape_and_dtype( + nodal_vel, (self.num_instances, self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel" + ) else: - self.assert_shape_and_dtype(nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel") + self.assert_shape_and_dtype( + nodal_vel, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec3f, "nodal_vel" + ) # convert torch to warp if needed if isinstance(nodal_vel, torch.Tensor): nodal_vel = wp.from_torch(nodal_vel.contiguous(), dtype=wp.vec3f) @@ -372,9 +380,13 @@ def write_nodal_kinematic_target_to_sim_index( # resolve env_ids env_ids = self._resolve_env_ids(env_ids) if full_data: - self.assert_shape_and_dtype(targets, (self.num_instances, self.max_sim_vertices_per_body), wp.vec4f, "targets") + self.assert_shape_and_dtype( + targets, (self.num_instances, self.max_sim_vertices_per_body), wp.vec4f, "targets" + ) else: - self.assert_shape_and_dtype(targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f, "targets") + self.assert_shape_and_dtype( + targets, (env_ids.shape[0], self.max_sim_vertices_per_body), wp.vec4f, "targets" + ) # convert torch to warp if needed, ensuring 2D (num_envs, V, 4) -> (num_envs, V) vec4f if isinstance(targets, torch.Tensor): if targets.dim() == 2: @@ -416,7 +428,6 @@ def write_nodal_kinematic_target_to_sim_mask( env_ids = self._ALL_INDICES self.write_nodal_kinematic_target_to_sim_index(targets, env_ids=env_ids, full_data=True) - """ Operations - Deprecated wrappers. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 13ca7fd43d2..27eaaa7986c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -1077,7 +1077,6 @@ def _invalidate_initialize_callback(self, event): # set all existing views to None to invalidate them self._root_view = None - @property def root_physx_view(self) -> physx.RigidBodyView: """Deprecated property. Please use :attr:`root_view` instead.""" diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 0d9415a3738..0efa1c9dff9 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -616,9 +616,13 @@ def write_body_com_velocity_to_sim_index( env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) if full_data: - self.assert_shape_and_dtype(body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities") + self.assert_shape_and_dtype( + body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities" + ) else: - self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") + self.assert_shape_and_dtype( + body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities" + ) wp.launch( shared_kernels.set_body_com_velocity_to_sim, dim=(env_ids.shape[0], body_ids.shape[0]), @@ -720,9 +724,13 @@ def write_body_link_velocity_to_sim_index( env_ids = self._resolve_env_ids(env_ids) body_ids = self._resolve_body_ids(body_ids) if full_data: - self.assert_shape_and_dtype(body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities") + self.assert_shape_and_dtype( + body_velocities, (self.num_instances, self.num_bodies), wp.spatial_vectorf, "body_velocities" + ) else: - self.assert_shape_and_dtype(body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities") + self.assert_shape_and_dtype( + body_velocities, (env_ids.shape[0], body_ids.shape[0]), wp.spatial_vectorf, "body_velocities" + ) # Access body_com_pose_b and body_link_pose_w to ensure they are current. wp.launch( shared_kernels.set_body_link_velocity_to_sim, @@ -1378,7 +1386,6 @@ def _on_prim_deletion(self, prim_path: str) -> None: self._clear_callbacks() return - """ Deprecated properties and methods. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index 7abe60aa1bc..cd1e4e0aaea 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -427,7 +427,6 @@ def reset(self, indices: torch.Tensor | None = None) -> None: env_ids = self._resolve_env_ids(indices) self.reset_index(env_ids) - """ Initialization. """ From 9194a90da4f35819f4e3cc76f6c1a1d1588b638c Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 14:02:45 +0100 Subject: [PATCH 06/13] Changelogs. --- source/isaaclab/docs/CHANGELOG.rst | 20 ++++++++++++++++++++ source/isaaclab_newton/docs/CHANGELOG.rst | 13 +++++++++++++ source/isaaclab_physx/docs/CHANGELOG.rst | 16 ++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 0bb821cbc74..ea4471ed43c 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,26 @@ Changelog --------- +4.2.2 (2026-02-26) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype` and + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype_mask` validation methods to + :class:`~isaaclab.assets.AssetBase` for runtime shape and dtype checking of write method + inputs. Checks are only active in debug mode (``__debug__``), adding zero overhead in + optimized builds. + +Changed +^^^^^^^ + +* Fixed tendon setter signatures in :class:`~isaaclab.assets.BaseArticulation` + (``set_fixed_tendon_*`` and ``set_spatial_tendon_*``) now accept ``float`` values in + addition to tensors and warp arrays. + + 4.2.1 (2026-02-25) ~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_newton/docs/CHANGELOG.rst b/source/isaaclab_newton/docs/CHANGELOG.rst index d843896c74f..57e125c9478 100644 --- a/source/isaaclab_newton/docs/CHANGELOG.rst +++ b/source/isaaclab_newton/docs/CHANGELOG.rst @@ -1,6 +1,19 @@ Changelog --------- +0.2.2 (2026-02-26) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added runtime shape and dtype validation to all write methods in + :class:`~isaaclab_newton.assets.Articulation` and + :class:`~isaaclab_newton.assets.RigidObject` using + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype` and + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype_mask`. + + 0.2.1 (2026-02-25) Removed diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index 7274766bd3a..acfc16f31c2 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -1,6 +1,22 @@ Changelog --------- +0.5.2 (2026-02-25) +~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added runtime shape and dtype validation to all write methods in + :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, + :class:`~isaaclab_physx.assets.RigidObjectCollection`, + :class:`~isaaclab_physx.assets.DeformableObject`, and + :class:`~isaaclab_physx.assets.SurfaceGripper` using + :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype`. Validates input dimensions + and types before kernel launch to catch mismatches early. + + 0.5.1 (2026-02-25) ~~~~~~~~~~~~~~~~~~ From a3e36cb18a3e73e906234d4e9be0a5f1e263e0b4 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 15:13:36 +0100 Subject: [PATCH 07/13] fixing inconsistent shape in the rigid object outputs. --- .../rigid_object/base_rigid_object_data.py | 4 ++-- source/isaaclab/isaaclab/envs/mdp/events.py | 16 +++++----------- .../assets/rigid_object/rigid_object.py | 5 +++-- .../assets/rigid_object/rigid_object_data.py | 6 +++--- .../rigid_object_collection_data.py | 2 +- 5 files changed, 14 insertions(+), 19 deletions(-) diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 1f92ee9b5d5..e04a1ffa8bc 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -259,8 +259,8 @@ def body_com_pose_b(self) -> wp.array: def body_mass(self) -> wp.array: """Mass of all bodies in the simulation world frame. - Shape is (num_instances, 1, 1), dtype = wp.float32. - In torch this resolves to (num_instances, 1, 1). + Shape is (num_instances, 1), dtype = wp.float32. + In torch this resolves to (num_instances, 1). """ raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 1b995612881..6e91d4fa5dc 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -355,7 +355,6 @@ def __call__( self.default_mass = wp.to_torch(self.asset.data.body_mass).clone() if self.default_inertia is None: self.default_inertia = wp.to_torch(self.asset.data.body_inertia).clone() - # resolve environment ids if env_ids is None: env_ids = torch.arange(env.scene.num_envs, device=self.asset.device, dtype=torch.int32) @@ -394,16 +393,11 @@ def __call__( # scale the inertia tensors by the the ratios # since mass randomization is done on default values, we can use the default inertia tensors inertias = wp.to_torch(self.asset.data.body_inertia).clone() - print("inertias device: ", inertias.device) - print("inertias shape: ", inertias.shape) - if isinstance(self.asset, BaseArticulation): - # inertia has shape: (num_envs, num_bodies, 9) for articulation - inertias[env_ids[:, None], body_ids] = ( - self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] - ) - else: - # inertia has shape: (num_envs, 9) for rigid object - inertias[env_ids] = self.default_inertia[env_ids] * ratios + # inertia has shape: (num_envs, num_bodies, 9) for all assets + inertias[env_ids[:, None], body_ids] = ( + self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] + ) + print("inertias: ", inertias.shape) # set the inertia tensors into the physics simulation self.asset.set_inertias_index(inertias=inertias, env_ids=env_ids) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 27eaaa7986c..fbea1483718 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -878,11 +878,12 @@ def set_inertias_index( self.assert_shape_and_dtype(inertias, (env_ids.shape[0], body_ids.shape[0], 9), wp.float32, "inertias") # Warp kernels can ingest torch tensors directly, so we don't need to convert to warp arrays here. wp.launch( - shared_kernels.write_single_body_inertia_to_buffer, + shared_kernels.write_body_inertia_to_buffer, dim=(env_ids.shape[0], body_ids.shape[0]), inputs=[ inertias, env_ids, + self._ALL_BODY_INDICES, full_data, ], outputs=[ @@ -895,7 +896,7 @@ def set_inertias_index( cpu_env_ids = wp.clone(env_ids, device="cpu") else: cpu_env_ids = wp.clone(wp.from_torch(env_ids, dtype=wp.int32), device="cpu") - self.root_view.set_inertias(wp.clone(self.data._body_inertia, device="cpu"), indices=cpu_env_ids) + self.root_view.set_inertias(wp.clone(self.data._body_inertia, device="cpu").flatten(), indices=cpu_env_ids) def set_inertias_mask( self, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index 6cf82afd592..edad08ca4a9 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -265,8 +265,8 @@ def root_com_vel_w(self) -> wp.array: def body_mass(self) -> wp.array: """Mass of all bodies in the simulation world frame. - Shape is (num_instances, 1, 1), dtype = wp.float32. - In torch this resolves to (num_instances, 1, 1). + Shape is (num_instances, 1), dtype = wp.float32. + In torch this resolves to (num_instances, 1). """ return self._body_mass @@ -685,7 +685,7 @@ def _create_buffers(self) -> None: # -- Body properties self._body_mass = wp.clone(self._root_view.get_masses(), device=self.device) - self._body_inertia = wp.clone(self._root_view.get_inertias(), device=self.device) + self._body_inertia = wp.clone(self._root_view.get_inertias(), device=self.device).reshape((self._num_instances, 1, 9)) """ Internal helpers. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index 5d6a6835931..3b06484f6de 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -610,7 +610,7 @@ def _create_buffers(self) -> None: # -- Body properties (stored in instance order: num_instances, num_bodies[, data_dim]) # Masses: view returns (B*I, 1) in view order. _reshape_view_to_data gives (I, B) in instance order. self._body_mass = self._reshape_view_to_data_2d(self._root_view.get_masses()).reshape( - (self.num_instances, self.num_bodies, 1) + (self.num_instances, self.num_bodies) ) # Inertias: view returns (B*I, 9) in view order. Need (I, B, 9) in instance order. # _reshape_view_to_data only handles single-element dtypes, so we use _reshape_view_to_data_3d. From bed0430614e61841ff606c689ca37e461cd73218 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 15:31:10 +0100 Subject: [PATCH 08/13] pre-commits and done. Tests are looking good. --- source/isaaclab/isaaclab/envs/mdp/events.py | 4 +--- .../isaaclab_physx/assets/rigid_object/rigid_object_data.py | 4 +++- source/isaaclab_physx/test/assets/test_rigid_object.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 6e91d4fa5dc..617cfe3e515 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -394,9 +394,7 @@ def __call__( # since mass randomization is done on default values, we can use the default inertia tensors inertias = wp.to_torch(self.asset.data.body_inertia).clone() # inertia has shape: (num_envs, num_bodies, 9) for all assets - inertias[env_ids[:, None], body_ids] = ( - self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] - ) + inertias[env_ids[:, None], body_ids] = self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] print("inertias: ", inertias.shape) # set the inertia tensors into the physics simulation self.asset.set_inertias_index(inertias=inertias, env_ids=env_ids) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index edad08ca4a9..0a6157585c5 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -685,7 +685,9 @@ def _create_buffers(self) -> None: # -- Body properties self._body_mass = wp.clone(self._root_view.get_masses(), device=self.device) - self._body_inertia = wp.clone(self._root_view.get_inertias(), device=self.device).reshape((self._num_instances, 1, 9)) + self._body_inertia = wp.clone(self._root_view.get_inertias(), device=self.device).reshape( + (self._num_instances, 1, 9) + ) """ Internal helpers. diff --git a/source/isaaclab_physx/test/assets/test_rigid_object.py b/source/isaaclab_physx/test/assets/test_rigid_object.py index b5f36f43a58..9e3f175296c 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object.py @@ -121,7 +121,7 @@ def test_initialization(num_cubes, device): assert wp.to_torch(cube_object.data.root_pos_w).shape == (num_cubes, 3) assert wp.to_torch(cube_object.data.root_quat_w).shape == (num_cubes, 4) assert wp.to_torch(cube_object.data.body_mass).shape == (num_cubes, 1) - assert wp.to_torch(cube_object.data.body_inertia).shape == (num_cubes, 9) + assert wp.to_torch(cube_object.data.body_inertia).shape == (num_cubes, 1, 9) # Simulate physics for _ in range(2): From c5b2107803558f1df34308f2c5f0678716b52ca7 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 25 Feb 2026 15:34:34 +0100 Subject: [PATCH 09/13] Noooow, we should be good. --- .../test/assets/test_rigid_object_collection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/isaaclab_physx/test/assets/test_rigid_object_collection.py b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py index 977161c963e..0f25d21360a 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object_collection.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py @@ -128,7 +128,7 @@ def test_initialization(sim, num_envs, num_cubes, device): # Check buffers that exist and have correct shapes assert wp.to_torch(object_collection.data.body_link_pos_w).shape == (num_envs, num_cubes, 3) assert wp.to_torch(object_collection.data.body_link_quat_w).shape == (num_envs, num_cubes, 4) - assert wp.to_torch(object_collection.data.body_mass).shape == (num_envs, num_cubes, 1) + assert wp.to_torch(object_collection.data.body_mass).shape == (num_envs, num_cubes) assert wp.to_torch(object_collection.data.body_inertia).shape == (num_envs, num_cubes, 9) # Simulate physics @@ -288,7 +288,7 @@ def test_external_force_on_single_body(sim, num_envs, num_cubes, device): # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) # Every 2nd cube should have a force applied to it - external_wrench_b[:, 0::2, 2] = 9.81 * wp.to_torch(object_collection.data.body_mass)[:, 0::2, 0] + external_wrench_b[:, 0::2, 2] = 9.81 * wp.to_torch(object_collection.data.body_mass)[:, 0::2] for i in range(5): # reset object state From eae0f3bd6c52b213756092113c89ac447933779c Mon Sep 17 00:00:00 2001 From: Kelly Guo Date: Wed, 25 Feb 2026 18:00:32 -0800 Subject: [PATCH 10/13] Bump version to 0.2.2 in extension.toml Signed-off-by: Kelly Guo --- source/isaaclab_newton/config/extension.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/isaaclab_newton/config/extension.toml b/source/isaaclab_newton/config/extension.toml index a11c7f06e8e..7bca448a441 100644 --- a/source/isaaclab_newton/config/extension.toml +++ b/source/isaaclab_newton/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.2.1" +version = "0.2.2" # Description title = "Newton simulation interfaces for IsaacLab core package" From 50ac8ef9f49d4fc8b39415bb4a83bfa8275417fb Mon Sep 17 00:00:00 2001 From: Kelly Guo Date: Wed, 25 Feb 2026 18:01:12 -0800 Subject: [PATCH 11/13] Update source/isaaclab/isaaclab/envs/mdp/events.py Signed-off-by: Kelly Guo --- source/isaaclab/isaaclab/envs/mdp/events.py | 1 - 1 file changed, 1 deletion(-) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 617cfe3e515..a15ddf59c57 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -395,7 +395,6 @@ def __call__( inertias = wp.to_torch(self.asset.data.body_inertia).clone() # inertia has shape: (num_envs, num_bodies, 9) for all assets inertias[env_ids[:, None], body_ids] = self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] - print("inertias: ", inertias.shape) # set the inertia tensors into the physics simulation self.asset.set_inertias_index(inertias=inertias, env_ids=env_ids) From 35ee02c27de00495bd3add295ca1daf5c890fceb Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 26 Feb 2026 09:33:01 +0100 Subject: [PATCH 12/13] fixed contact sensor tests --- source/isaaclab_physx/test/sensors/test_contact_sensor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/isaaclab_physx/test/sensors/test_contact_sensor.py b/source/isaaclab_physx/test/sensors/test_contact_sensor.py index f5344c10d46..44823d93cee 100644 --- a/source/isaaclab_physx/test/sensors/test_contact_sensor.py +++ b/source/isaaclab_physx/test/sensors/test_contact_sensor.py @@ -481,7 +481,7 @@ def test_friction_reporting(setup_simulation, grav_dir): scene["contact_sensor"].reset() scene["shape"].write_root_pose_to_sim( - root_pose=torch.tensor([0, 0.0, CUBE_CFG.spawn.size[2] / 2.0, 1, 0, 0, 0], device=device) + root_pose=torch.tensor([0, 0.0, CUBE_CFG.spawn.size[2] / 2.0, 1, 0, 0, 0], device=device).unsqueeze(0) ) # step sim once to compute friction forces @@ -703,7 +703,7 @@ def _test_sensor_contact( duration = durations[idx] while current_test_time < duration: # set object states to contact the ground plane - shape.write_root_pose_to_sim(root_pose=torch.tensor(test_pose, device=shape.device)) + shape.write_root_pose_to_sim(root_pose=torch.tensor(test_pose, device=shape.device).unsqueeze(0)) # perform simulation step _perform_sim_step(sim, scene, sim_dt) # increment contact time @@ -735,7 +735,7 @@ def _test_sensor_contact( _test_friction_forces(shape, sensor, mode) # switch the contact mode for 1 dt step before the next contact test begins. - shape.write_root_pose_to_sim(root_pose=torch.tensor(reset_pose, device=shape.device)) + shape.write_root_pose_to_sim(root_pose=torch.tensor(reset_pose, device=shape.device).unsqueeze(0)) # perform simulation step _perform_sim_step(sim, scene, sim_dt) # set the last air time to 2 sim_dt steps, because last_air_time and last_contact_time From 4b00ea8091e1c7c6373f64e06f31c47ee62251f7 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 26 Feb 2026 10:13:46 +0100 Subject: [PATCH 13/13] Changelogs --- source/isaaclab/config/extension.toml | 2 +- source/isaaclab_physx/config/extension.toml | 2 +- source/isaaclab_tasks/docs/CHANGELOG.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index 2ae48aa0b7a..73085deba3f 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "4.2.1" +version = "4.2.2" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab_physx/config/extension.toml b/source/isaaclab_physx/config/extension.toml index 13c77b52f79..2eee37b55ab 100644 --- a/source/isaaclab_physx/config/extension.toml +++ b/source/isaaclab_physx/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.5.1" +version = "0.5.2" # Description title = "PhysX simulation interfaces for IsaacLab core package" diff --git a/source/isaaclab_tasks/docs/CHANGELOG.rst b/source/isaaclab_tasks/docs/CHANGELOG.rst index a376bd8a76c..498d0c3013f 100644 --- a/source/isaaclab_tasks/docs/CHANGELOG.rst +++ b/source/isaaclab_tasks/docs/CHANGELOG.rst @@ -1,7 +1,7 @@ Changelog --------- -1.3.0 (2026-02-25) +1.3.0 (2026-02-26) ~~~~~~~~~~~~~~~~~~ Changed