diff --git a/source/extensions/omni.isaac.lab/config/extension.toml b/source/extensions/omni.isaac.lab/config/extension.toml index 28a55640492..6f43e77bf2d 100644 --- a/source/extensions/omni.isaac.lab/config/extension.toml +++ b/source/extensions/omni.isaac.lab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.27.10" +version = "0.27.11" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst index 991b6268d7e..b67d43e9ed4 100644 --- a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst +++ b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst @@ -1,6 +1,16 @@ Changelog --------- +0.27.11 (2024-10-31) +~~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added support to define tuple of floats to scale observation terms by expanding the + :attr:`omni.isaac.lab.managers.manager_term_cfg.ObservationManagerCfg.scale` attribute. + + 0.27.10 (2024-11-01) ~~~~~~~~~~~~~~~~~~~~ diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py index 9a2250e48bd..1a510159e20 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py @@ -152,9 +152,13 @@ class ObservationTermCfg(ManagerTermBaseCfg): """The clipping range for the observation after adding noise. Defaults to None, in which case no clipping is applied.""" - scale: float | None = None + scale: tuple[float, ...] | float | None = None """The scale to apply to the observation after clipping. Defaults to None, - in which case no scaling is applied (same as setting scale to :obj:`1`).""" + in which case no scaling is applied (same as setting scale to :obj:`1`). + + We leverage PyTorch broadcasting to scale the observation tensor with the provided value. If a tuple is provided, + please make sure the length of the tuple matches the dimensions of the tensor outputted from the term. + """ @configclass diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/observation_manager.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/observation_manager.py index b270b2d456a..6bc9b0374b4 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/observation_manager.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/observation_manager.py @@ -259,7 +259,7 @@ def compute_group(self, group_name: str) -> torch.Tensor | dict[str, torch.Tenso obs = term_cfg.noise.func(obs, term_cfg.noise) if term_cfg.clip: obs = obs.clip_(min=term_cfg.clip[0], max=term_cfg.clip[1]) - if term_cfg.scale: + if term_cfg.scale is not None: obs = obs.mul_(term_cfg.scale) # add value to list group_obs[name] = obs @@ -343,6 +343,23 @@ def _prepare_terms(self): obs_dims = tuple(term_cfg.func(self._env, **term_cfg.params).shape) self._group_obs_term_dim[group_name].append(obs_dims[1:]) + # if scale is set, check if single float or tuple + if term_cfg.scale is not None: + if not isinstance(term_cfg.scale, (float, int, tuple)): + raise TypeError( + f"Scale for observation term '{term_name}' in group '{group_name}'" + f" is not of type float, int or tuple. Received: '{type(term_cfg.scale)}'." + ) + if isinstance(term_cfg.scale, tuple) and len(term_cfg.scale) != obs_dims[1]: + raise ValueError( + f"Scale for observation term '{term_name}' in group '{group_name}'" + f" does not match the dimensions of the observation. Expected: {obs_dims[1]}" + f" but received: {len(term_cfg.scale)}." + ) + + # cast the scale into torch tensor + term_cfg.scale = torch.tensor(term_cfg.scale, dtype=torch.float, device=self._env.device) + # prepare modifiers for each observation if term_cfg.modifiers is not None: # initialize list of modifiers for term diff --git a/source/extensions/omni.isaac.lab/test/managers/test_observation_manager.py b/source/extensions/omni.isaac.lab/test/managers/test_observation_manager.py index 9b73e2d44a3..c624fb2bd1a 100644 --- a/source/extensions/omni.isaac.lab/test/managers/test_observation_manager.py +++ b/source/extensions/omni.isaac.lab/test/managers/test_observation_manager.py @@ -244,6 +244,8 @@ class SampleImageGroupCfg(ObservationGroupCfg): def test_compute(self): """Test the observation computation.""" + pos_scale_tuple = (2.0, 3.0, 1.0) + @configclass class MyObservationManagerCfg: """Test config class for observation manager.""" @@ -254,14 +256,14 @@ class PolicyCfg(ObservationGroupCfg): term_1 = ObservationTermCfg(func=grilled_chicken, scale=10) term_2 = ObservationTermCfg(func=grilled_chicken_with_curry, scale=0.0, params={"hot": False}) - term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0) + term_3 = ObservationTermCfg(func=pos_w_data, scale=pos_scale_tuple) term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5) @configclass class CriticCfg(ObservationGroupCfg): - term_1 = ObservationTermCfg(func=pos_w_data, scale=2.0) + term_1 = ObservationTermCfg(func=pos_w_data, scale=pos_scale_tuple) term_2 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5) - term_3 = ObservationTermCfg(func=pos_w_data, scale=2.0) + term_3 = ObservationTermCfg(func=pos_w_data, scale=pos_scale_tuple) term_4 = ObservationTermCfg(func=lin_vel_w_data, scale=1.5) @configclass @@ -289,6 +291,11 @@ class ImageCfg(ObservationGroupCfg): self.assertEqual((self.env.num_envs, 11), obs_policy.shape) self.assertEqual((self.env.num_envs, 12), obs_critic.shape) self.assertEqual((self.env.num_envs, 128, 256, 4), obs_image.shape) + # check that the scales are applied correctly + torch.testing.assert_close( + self.env.data.pos_w * torch.tensor(pos_scale_tuple, device=self.env.device), obs_critic[:, :3] + ) + torch.testing.assert_close(self.env.data.lin_vel_w * 1.5, obs_critic[:, 3:6]) # make sure that the data are the same for same terms # -- within group torch.testing.assert_close(obs_critic[:, 0:3], obs_critic[:, 6:9])