Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion source/isaaclab/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "4.2.1"
version = "4.2.2"

# Description
title = "Isaac Lab framework for Robot Learning"
Expand Down
20 changes: 20 additions & 0 deletions source/isaaclab/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,26 @@
Changelog
---------

4.2.2 (2026-02-26)
~~~~~~~~~~~~~~~~~~

Added
^^^^^

* Added :meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype` and
:meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype_mask` validation methods to
:class:`~isaaclab.assets.AssetBase` for runtime shape and dtype checking of write method
inputs. Checks are only active in debug mode (``__debug__``), adding zero overhead in
optimized builds.

Changed
^^^^^^^

* Fixed tendon setter signatures in :class:`~isaaclab.assets.BaseArticulation`
(``set_fixed_tendon_*`` and ``set_spatial_tendon_*``) now accept ``float`` values in
addition to tensors and warp arrays.


4.2.1 (2026-02-25)
~~~~~~~~~~~~~~~~~~

Expand Down
40 changes: 20 additions & 20 deletions source/isaaclab/isaaclab/assets/articulation/base_articulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1435,7 +1435,7 @@ def set_joint_effort_target_mask(
def set_fixed_tendon_stiffness_index(
self,
*,
stiffness: torch.Tensor | wp.array,
stiffness: float | torch.Tensor | wp.array,
fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1463,7 +1463,7 @@ def set_fixed_tendon_stiffness_index(
def set_fixed_tendon_stiffness_mask(
self,
*,
stiffness: torch.Tensor | wp.array,
stiffness: float | torch.Tensor | wp.array,
fixed_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1492,7 +1492,7 @@ def set_fixed_tendon_stiffness_mask(
def set_fixed_tendon_damping_index(
self,
*,
damping: torch.Tensor | wp.array,
damping: float | torch.Tensor | wp.array,
fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1520,7 +1520,7 @@ def set_fixed_tendon_damping_index(
def set_fixed_tendon_damping_mask(
self,
*,
damping: torch.Tensor | wp.array,
damping: float | torch.Tensor | wp.array,
fixed_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1549,7 +1549,7 @@ def set_fixed_tendon_damping_mask(
def set_fixed_tendon_limit_stiffness_index(
self,
*,
limit_stiffness: torch.Tensor | wp.array,
limit_stiffness: float | torch.Tensor | wp.array,
fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1577,7 +1577,7 @@ def set_fixed_tendon_limit_stiffness_index(
def set_fixed_tendon_limit_stiffness_mask(
self,
*,
limit_stiffness: torch.Tensor | wp.array,
limit_stiffness: float | torch.Tensor | wp.array,
fixed_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1606,7 +1606,7 @@ def set_fixed_tendon_limit_stiffness_mask(
def set_fixed_tendon_position_limit_index(
self,
*,
limit: torch.Tensor | wp.array,
limit: float | torch.Tensor | wp.array,
fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1634,7 +1634,7 @@ def set_fixed_tendon_position_limit_index(
def set_fixed_tendon_position_limit_mask(
self,
*,
limit: torch.Tensor | wp.array,
limit: float | torch.Tensor | wp.array,
fixed_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1663,7 +1663,7 @@ def set_fixed_tendon_position_limit_mask(
def set_fixed_tendon_rest_length_index(
self,
*,
rest_length: torch.Tensor | wp.array,
rest_length: float | torch.Tensor | wp.array,
fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1691,7 +1691,7 @@ def set_fixed_tendon_rest_length_index(
def set_fixed_tendon_rest_length_mask(
self,
*,
rest_length: torch.Tensor | wp.array,
rest_length: float | torch.Tensor | wp.array,
fixed_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1720,7 +1720,7 @@ def set_fixed_tendon_rest_length_mask(
def set_fixed_tendon_offset_index(
self,
*,
offset: torch.Tensor | wp.array,
offset: float | torch.Tensor | wp.array,
fixed_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1748,7 +1748,7 @@ def set_fixed_tendon_offset_index(
def set_fixed_tendon_offset_mask(
self,
*,
offset: torch.Tensor | wp.array,
offset: float | torch.Tensor | wp.array,
fixed_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1822,7 +1822,7 @@ def write_fixed_tendon_properties_to_sim_mask(
def set_spatial_tendon_stiffness_index(
self,
*,
stiffness: torch.Tensor | wp.array,
stiffness: float | torch.Tensor | wp.array,
spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1850,7 +1850,7 @@ def set_spatial_tendon_stiffness_index(
def set_spatial_tendon_stiffness_mask(
self,
*,
stiffness: torch.Tensor | wp.array,
stiffness: float | torch.Tensor | wp.array,
spatial_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1879,7 +1879,7 @@ def set_spatial_tendon_stiffness_mask(
def set_spatial_tendon_damping_index(
self,
*,
damping: torch.Tensor | wp.array,
damping: float | torch.Tensor | wp.array,
spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1907,7 +1907,7 @@ def set_spatial_tendon_damping_index(
def set_spatial_tendon_damping_mask(
self,
*,
damping: torch.Tensor | wp.array,
damping: float | torch.Tensor | wp.array,
spatial_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1936,7 +1936,7 @@ def set_spatial_tendon_damping_mask(
def set_spatial_tendon_limit_stiffness_index(
self,
*,
limit_stiffness: torch.Tensor | wp.array,
limit_stiffness: float | torch.Tensor | wp.array,
spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1965,7 +1965,7 @@ def set_spatial_tendon_limit_stiffness_index(
def set_spatial_tendon_limit_stiffness_mask(
self,
*,
limit_stiffness: torch.Tensor | wp.array,
limit_stiffness: float | torch.Tensor | wp.array,
spatial_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -1994,7 +1994,7 @@ def set_spatial_tendon_limit_stiffness_mask(
def set_spatial_tendon_offset_index(
self,
*,
offset: torch.Tensor | wp.array,
offset: float | torch.Tensor | wp.array,
spatial_tendon_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
env_ids: Sequence[int] | torch.Tensor | wp.array | None = None,
) -> None:
Expand Down Expand Up @@ -2022,7 +2022,7 @@ def set_spatial_tendon_offset_index(
def set_spatial_tendon_offset_mask(
self,
*,
offset: torch.Tensor | wp.array,
offset: float | torch.Tensor | wp.array,
spatial_tendon_mask: wp.array | None = None,
env_mask: wp.array | None = None,
) -> None:
Expand Down
71 changes: 71 additions & 0 deletions source/isaaclab/isaaclab/assets/asset_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from typing import TYPE_CHECKING, Any

import torch
import warp as wp

import isaaclab.sim as sim_utils
from isaaclab.physics import PhysicsEvent, PhysicsManager
Expand Down Expand Up @@ -235,6 +236,76 @@ def update(self, dt: float):
"""
raise NotImplementedError

"""
Validation.
"""

# Mapping from warp dtype to the trailing dimensions that a torch.Tensor
# would have for the same data. Subclasses may extend this (e.g. custom
# ``vec6f`` in deformable objects) by updating the dict in their ``__init__``.
_DTYPE_TO_TORCH_TRAILING_DIMS: dict[type, tuple[int, ...]] = {
wp.float32: (),
wp.int32: (),
wp.vec2f: (2,),
wp.vec3f: (3,),
wp.vec4f: (4,),
wp.transformf: (7,),
wp.spatial_vectorf: (6,),
}

def assert_shape_and_dtype(
self, tensor: float | torch.Tensor | wp.array, shape: tuple[int, ...], dtype: type, name: str = ""
) -> None:
"""Assert the shape and dtype of a tensor or warp array.

Args:
tensor: The tensor or warp array to assert the shape of. Floats are skipped.
shape: The expected leading dimensions (e.g. ``(num_envs, num_joints)``).
dtype: The expected warp dtype.
name: Optional parameter name for error messages.
"""
if __debug__:
cls = type(self).__name__
prefix = f"{cls}: '{name}' " if name else f"{cls}: "
if isinstance(tensor, (int, float)):
return
elif isinstance(tensor, wp.array):
assert tensor.dtype == dtype, f"{prefix}Dtype mismatch: {tensor.dtype} != {dtype}"
assert tensor.shape == shape, f"{prefix}Shape mismatch: {tensor.shape} != {shape}"
elif isinstance(tensor, torch.Tensor):
offset = self._DTYPE_TO_TORCH_TRAILING_DIMS.get(dtype)
if offset is None:
raise ValueError(f"Unsupported dtype: {dtype}")
assert tensor.shape == (*shape, *offset), (
f"{prefix}Shape mismatch: {tensor.shape} != {(*shape, *offset)}"
)

def assert_shape_and_dtype_mask(
self,
tensor: float | torch.Tensor | wp.array,
masks: tuple[wp.array, ...],
dtype: type,
name: str = "",
trailing_dims: tuple[int, ...] = (),
) -> None:
"""Assert the shape of a tensor or warp array against mask dimensions.

Mask-based write methods expect **full-sized** data — one element per entry in each mask
dimension, regardless of how many entries are ``True``. The expected leading shape is therefore
``(mask_0.shape[0], mask_1.shape[0], ...)`` (i.e. the *total* size of each dimension, not the
number of selected entries).

Args:
tensor: The tensor or warp array to assert the shape of. Floats are skipped.
masks: Tuple of mask arrays whose ``shape[0]`` dimensions form the expected leading shape.
dtype: The expected warp dtype.
name: Optional parameter name for error messages.
trailing_dims: Extra trailing dimensions to append (e.g. ``(9,)`` for inertias with ``wp.float32``).
"""
if __debug__:
shape = (*tuple(m.shape[0] for m in masks), *trailing_dims)
self.assert_shape_and_dtype(tensor, shape, dtype, name)

"""
Implementation specific.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -259,8 +259,8 @@ def body_com_pose_b(self) -> wp.array:
def body_mass(self) -> wp.array:
"""Mass of all bodies in the simulation world frame.

Shape is (num_instances, 1, 1), dtype = wp.float32.
In torch this resolves to (num_instances, 1, 1).
Shape is (num_instances, 1), dtype = wp.float32.
In torch this resolves to (num_instances, 1).
"""
raise NotImplementedError()

Expand Down
13 changes: 2 additions & 11 deletions source/isaaclab/isaaclab/envs/mdp/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,6 @@ def __call__(
self.default_mass = wp.to_torch(self.asset.data.body_mass).clone()
if self.default_inertia is None:
self.default_inertia = wp.to_torch(self.asset.data.body_inertia).clone()

# resolve environment ids
if env_ids is None:
env_ids = torch.arange(env.scene.num_envs, device=self.asset.device, dtype=torch.int32)
Expand Down Expand Up @@ -394,16 +393,8 @@ def __call__(
# scale the inertia tensors by the the ratios
# since mass randomization is done on default values, we can use the default inertia tensors
inertias = wp.to_torch(self.asset.data.body_inertia).clone()
print("inertias device: ", inertias.device)
print("inertias shape: ", inertias.shape)
if isinstance(self.asset, BaseArticulation):
# inertia has shape: (num_envs, num_bodies, 9) for articulation
inertias[env_ids[:, None], body_ids] = (
self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None]
)
else:
# inertia has shape: (num_envs, 9) for rigid object
inertias[env_ids] = self.default_inertia[env_ids] * ratios
# inertia has shape: (num_envs, num_bodies, 9) for all assets
inertias[env_ids[:, None], body_ids] = self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None]
# set the inertia tensors into the physics simulation
self.asset.set_inertias_index(inertias=inertias, env_ids=env_ids)

Expand Down
2 changes: 1 addition & 1 deletion source/isaaclab_newton/config/extension.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]

# Note: Semantic Versioning is used: https://semver.org/
version = "0.2.1"
version = "0.2.2"

# Description
title = "Newton simulation interfaces for IsaacLab core package"
Expand Down
13 changes: 13 additions & 0 deletions source/isaaclab_newton/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,19 @@
Changelog
---------

0.2.2 (2026-02-26)
~~~~~~~~~~~~~~~~~~

Added
^^^^^

* Added runtime shape and dtype validation to all write methods in
:class:`~isaaclab_newton.assets.Articulation` and
:class:`~isaaclab_newton.assets.RigidObject` using
:meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype` and
:meth:`~isaaclab.assets.AssetBase.assert_shape_and_dtype_mask`.


0.2.1 (2026-02-25)

Removed
Expand Down
Loading
Loading