diff --git a/source/extensions/omni.isaac.lab/config/extension.toml b/source/extensions/omni.isaac.lab/config/extension.toml index 082ec35c24..9b53e611d5 100644 --- a/source/extensions/omni.isaac.lab/config/extension.toml +++ b/source/extensions/omni.isaac.lab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.27.25" +version = "0.27.26" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst index 7ff5ef82d4..f828612c19 100644 --- a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst +++ b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst @@ -1,6 +1,15 @@ Changelog --------- +0.27.26 (2024-12-12) +~~~~~~~~~~~~~~~~~~~~ + +Fixed +^^^^^ + +* Add action clip to all :class:`omni.isaac.lab.envs.mdp.actions`. + + 0.27.25 (2024-12-11) ~~~~~~~~~~~~~~~~~~~~ diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/binary_joint_actions.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/binary_joint_actions.py index 5c2ba3fa15..c43a907373 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/binary_joint_actions.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/binary_joint_actions.py @@ -40,9 +40,10 @@ class BinaryJointAction(ActionTerm): cfg: actions_cfg.BinaryJointActionCfg """The configuration of the action term.""" - _asset: Articulation """The articulation asset on which the action term is applied.""" + _clip: torch.Tensor + """The clip applied to the input action.""" def __init__(self, cfg: actions_cfg.BinaryJointActionCfg, env: ManagerBasedEnv) -> None: # initialize the action term @@ -83,6 +84,15 @@ def __init__(self, cfg: actions_cfg.BinaryJointActionCfg, env: ManagerBasedEnv) ) self._close_command[index_list] = torch.tensor(value_list, device=self.device) + # parse clip + if self.cfg.clip is not None: + if isinstance(cfg.clip, dict): + self._clip = torch.tensor([[-float('inf'), float('inf')]], device=self.device).expand(self.num_envs, self.action_dim, 2).clone() + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names) + self._clip[:, index_list] = torch.tensor(value_list, device=self.device) + else: + raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.") + """ Properties. """ @@ -115,6 +125,8 @@ def process_actions(self, actions: torch.Tensor): binary_mask = actions < 0 # compute the command self._processed_actions = torch.where(binary_mask, self._close_command, self._open_command) + if self.cfg.clip is not None: + self._processed_actions = torch.clamp(self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]) def reset(self, env_ids: Sequence[int] | None = None) -> None: self._raw_actions[env_ids] = 0.0 diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py index ee5586b7f2..b51febcd17 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py @@ -50,6 +50,8 @@ class JointAction(ActionTerm): """The scaling factor applied to the input action.""" _offset: torch.Tensor | float """The offset applied to the input action.""" + _clip: torch.Tensor + """The clip applied to the input action.""" def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> None: # initialize the action term @@ -94,6 +96,14 @@ def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> Non self._offset[:, index_list] = torch.tensor(value_list, device=self.device) else: raise ValueError(f"Unsupported offset type: {type(cfg.offset)}. Supported types are float and dict.") + # parse clip + if self.cfg.clip is not None: + if isinstance(cfg.clip, dict): + self._clip = torch.tensor([[-float('inf'), float('inf')]], device=self.device).expand(self.num_envs, self.action_dim, 2).clone() + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names) + self._clip[:, index_list] = torch.tensor(value_list, device=self.device) + else: + raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.") """ Properties. @@ -120,6 +130,9 @@ def process_actions(self, actions: torch.Tensor): self._raw_actions[:] = actions # apply the affine transformations self._processed_actions = self._raw_actions * self._scale + self._offset + # clip actions + if self.cfg.clip is not None: + self._processed_actions = torch.clamp(self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]) def reset(self, env_ids: Sequence[int] | None = None) -> None: self._raw_actions[env_ids] = 0.0 diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions_to_limits.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions_to_limits.py index 3b31c9502a..1be6b666bb 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions_to_limits.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions_to_limits.py @@ -44,6 +44,8 @@ class JointPositionToLimitsAction(ActionTerm): """The articulation asset on which the action term is applied.""" _scale: torch.Tensor | float """The scaling factor applied to the input action.""" + _clip: torch.Tensor + """The clip applied to the input action.""" def __init__(self, cfg: actions_cfg.JointPositionToLimitsActionCfg, env: ManagerBasedEnv): # initialize the action term @@ -76,6 +78,14 @@ def __init__(self, cfg: actions_cfg.JointPositionToLimitsActionCfg, env: Manager self._scale[:, index_list] = torch.tensor(value_list, device=self.device) else: raise ValueError(f"Unsupported scale type: {type(cfg.scale)}. Supported types are float and dict.") + # parse clip + if self.cfg.clip is not None: + if isinstance(cfg.clip, dict): + self._clip = torch.tensor([[-float('inf'), float('inf')]], device=self.device).expand(self.num_envs, self.action_dim, 2).clone() + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names) + self._clip[:, index_list] = torch.tensor(value_list, device=self.device) + else: + raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.") """ Properties. @@ -102,6 +112,8 @@ def process_actions(self, actions: torch.Tensor): self._raw_actions[:] = actions # apply affine transformations self._processed_actions = self._raw_actions * self._scale + if self.cfg.clip is not None: + self._processed_actions = torch.clamp(self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]) # rescale the position targets if configured # this is useful when the input actions are in the range [-1, 1] if self.cfg.rescale_to_limits: diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/non_holonomic_actions.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/non_holonomic_actions.py index fc9ed89d6e..30d1deddec 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/non_holonomic_actions.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/non_holonomic_actions.py @@ -11,6 +11,7 @@ import omni.log +import omni.isaac.lab.utils.string as string_utils from omni.isaac.lab.assets.articulation import Articulation from omni.isaac.lab.managers.action_manager import ActionTerm from omni.isaac.lab.utils.math import euler_xyz_from_quat @@ -59,6 +60,8 @@ class NonHolonomicAction(ActionTerm): """The scaling factor applied to the input action. Shape is (1, 2).""" _offset: torch.Tensor """The offset applied to the input action. Shape is (1, 2).""" + _clip: torch.Tensor + """The clip applied to the input action.""" def __init__(self, cfg: actions_cfg.NonHolonomicActionCfg, env: ManagerBasedEnv): # initialize the action term @@ -104,6 +107,14 @@ def __init__(self, cfg: actions_cfg.NonHolonomicActionCfg, env: ManagerBasedEnv) # save the scale and offset as tensors self._scale = torch.tensor(self.cfg.scale, device=self.device).unsqueeze(0) self._offset = torch.tensor(self.cfg.offset, device=self.device).unsqueeze(0) + # parse clip + if self.cfg.clip is not None: + if isinstance(cfg.clip, dict): + self._clip = torch.tensor([[-float('inf'), float('inf')]], device=self.device).expand(self.num_envs, self.action_dim, 2).clone() + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names) + self._clip[:, index_list] = torch.tensor(value_list, device=self.device) + else: + raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.") """ Properties. @@ -129,6 +140,9 @@ def process_actions(self, actions): # store the raw actions self._raw_actions[:] = actions self._processed_actions = self.raw_actions * self._scale + self._offset + # clip actions + if self.cfg.clip is not None: + self._processed_actions = torch.clamp(self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]) def apply_actions(self): # obtain current heading diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/task_space_actions.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/task_space_actions.py index a8a1108f50..96b653af50 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/task_space_actions.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/task_space_actions.py @@ -12,6 +12,7 @@ import omni.log import omni.isaac.lab.utils.math as math_utils +import omni.isaac.lab.utils.string as string_utils from omni.isaac.lab.assets.articulation import Articulation from omni.isaac.lab.controllers.differential_ik import DifferentialIKController from omni.isaac.lab.managers.action_manager import ActionTerm @@ -42,6 +43,8 @@ class DifferentialInverseKinematicsAction(ActionTerm): """The articulation asset on which the action term is applied.""" _scale: torch.Tensor """The scaling factor applied to the input action. Shape is (1, action_dim).""" + _clip: torch.Tensor + """The clip applied to the input action.""" def __init__(self, cfg: actions_cfg.DifferentialInverseKinematicsActionCfg, env: ManagerBasedEnv): # initialize the action term @@ -101,6 +104,15 @@ def __init__(self, cfg: actions_cfg.DifferentialInverseKinematicsActionCfg, env: else: self._offset_pos, self._offset_rot = None, None + # parse clip + if self.cfg.clip is not None: + if isinstance(cfg.clip, dict): + self._clip = torch.tensor([[-float('inf'), float('inf')]], device=self.device).expand(self.num_envs, self.action_dim, 2).clone() + index_list, _, value_list = string_utils.resolve_matching_names_values(self.cfg.clip, self._joint_names) + self._clip[:, index_list] = torch.tensor(value_list, device=self.device) + else: + raise ValueError(f"Unsupported clip type: {type(cfg.clip)}. Supported types are dict.") + """ Properties. """ @@ -138,6 +150,8 @@ def process_actions(self, actions: torch.Tensor): # store the raw actions self._raw_actions[:] = actions self._processed_actions[:] = self.raw_actions * self._scale + if self.cfg.clip is not None: + self._processed_actions = torch.clamp(self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1]) # obtain quantities from simulation ee_pos_curr, ee_quat_curr = self._compute_frame_pose() # set command into controller diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py index 54c0b726d0..3d7e5a4e0b 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/managers/manager_term_cfg.py @@ -93,6 +93,9 @@ class for more details. debug_vis: bool = False """Whether to visualize debug information. Defaults to False.""" + clip: dict[str, tuple] | None = None + """Clip range for the action (dict of regex expressions). Defaults to None.""" + ## # Command manager.