Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion source/extensions/omni.isaac.lab/config/extension.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# Note: Semantic Versioning is used: https://semver.org/

version = "0.27.7"
version = "0.27.8"

# Description
title = "Isaac Lab framework for Robot Learning"
Expand Down
9 changes: 9 additions & 0 deletions source/extensions/omni.isaac.lab/docs/CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
Changelog
---------

0.27.8 (2024-10-31)
~~~~~~~~~~~~~~~~~~~~

Added
^^^^^

* Added support to define tuple of floats to scale observation terms by expanding the
:attr:`omni.isaac.lab.managers.manager_term_cfg.ObservationManagerCfg.scale` attribute.


0.27.7 (2024-10-28)
~~~~~~~~~~~~~~~~~~~
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,9 +152,13 @@ class ObservationTermCfg(ManagerTermBaseCfg):
"""The clipping range for the observation after adding noise. Defaults to None,
in which case no clipping is applied."""

scale: float | None = None
scale: tuple[float, ...] | float | None = None
"""The scale to apply to the observation after clipping. Defaults to None,
in which case no scaling is applied (same as setting scale to :obj:`1`)."""
in which case no scaling is applied (same as setting scale to :obj:`1`).

We leverage PyTorch broadcasting to scale the observation tensor with the provided value. If a tuple is provided,
please make sure the length of the tuple matches the dimensions of the tensor outputted from the term.
"""


@configclass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ def compute_group(self, group_name: str) -> torch.Tensor | dict[str, torch.Tenso
obs = term_cfg.noise.func(obs, term_cfg.noise)
if term_cfg.clip:
obs = obs.clip_(min=term_cfg.clip[0], max=term_cfg.clip[1])
if term_cfg.scale:
if term_cfg.scale is not None:
obs = obs.mul_(term_cfg.scale)
# add value to list
group_obs[name] = obs
Expand Down Expand Up @@ -343,6 +343,23 @@ def _prepare_terms(self):
obs_dims = tuple(term_cfg.func(self._env, **term_cfg.params).shape)
self._group_obs_term_dim[group_name].append(obs_dims[1:])

# if scale is set, check if single float or tuple
if term_cfg.scale is not None:
if not isinstance(term_cfg.scale, (float, int, tuple)):
raise TypeError(
f"Scale for observation term '{term_name}' in group '{group_name}'"
f" is not of type float, int or tuple. Received: '{type(term_cfg.scale)}'."
)
if isinstance(term_cfg.scale, tuple) and len(term_cfg.scale) != obs_dims[1]:
raise ValueError(
f"Scale for observation term '{term_name}' in group '{group_name}'"
f" does not match the dimensions of the observation. Expected: {obs_dims[1]}"
f" but received: {len(term_cfg.scale)}."
)

# cast the scale into torch tensor
term_cfg.scale = torch.tensor(term_cfg.scale, dtype=torch.float, device=self._env.device)

# prepare modifiers for each observation
if term_cfg.modifiers is not None:
# initialize list of modifiers for term
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,8 @@ class SampleImageGroupCfg(ObservationGroupCfg):
def test_compute(self):
"""Test the observation computation."""

pos_scale_tuple = (2.0, 3.0, 1.0)

@configclass
class MyObservationManagerCfg:
"""Test config class for observation manager."""
Expand All @@ -254,14 +256,14 @@ class PolicyCfg(ObservationGroupCfg):

term_1 = ObservationTermCfg(func=grilled_chicken, scale=10)
term_2 = ObservationTermCfg(func=grilled_chicken_with_curry, scale=0.0, params={"hot": False})
term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0)
term_3 = ObservationTermCfg(func=pos_w_data, scale=pos_scale_tuple)
term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5)

@configclass
class CriticCfg(ObservationGroupCfg):
term_1 = ObservationTermCfg(func=pos_w_data, scale=2.0)
term_1 = ObservationTermCfg(func=pos_w_data, scale=pos_scale_tuple)
term_2 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5)
term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0)
term_3 = ObservationTermCfg(func=pos_w_data, scale=pos_scale_tuple)
term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5)

@configclass
Expand Down Expand Up @@ -289,6 +291,11 @@ class ImageCfg(ObservationGroupCfg):
self.assertEqual((self.env.num_envs, 11), obs_policy.shape)
self.assertEqual((self.env.num_envs, 12), obs_critic.shape)
self.assertEqual((self.env.num_envs, 128, 256, 4), obs_image.shape)
# check that the scales are applied correctly
torch.testing.assert_close(
self.env.data.pos_w * torch.tensor(pos_scale_tuple, device=self.env.device), obs_critic[:, :3]
)
torch.testing.assert_close(self.env.data.lin_vel_w * 1.5, obs_critic[:, 3:6])
# make sure that the data are the same for same terms
# -- within group
torch.testing.assert_close(obs_critic[:, 0:3], obs_critic[:, 6:9])
Expand Down
Loading