diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/actions_cfg.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/actions_cfg.py index 6d07ceb855..76d9fb7818 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/actions_cfg.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/actions_cfg.py @@ -29,6 +29,8 @@ class JointActionCfg(ActionTermCfg): """Scale factor for the action (float or dict of regex expressions). Defaults to 1.0.""" offset: float | dict[str, float] = 0.0 """Offset factor for the action (float or dict of regex expressions). Defaults to 0.0.""" + clip: dict[str, tuple] | None = None + """Clip range for the action (dict of regex expressions). Defaults to None.""" preserve_order: bool = False """Whether to preserve the order of the joint names in the action output. Defaults to False.""" diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py index ee5586b7f2..864c05dbf2 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/actions/joint_actions.py @@ -50,6 +50,8 @@ class JointAction(ActionTerm): """The scaling factor applied to the input action.""" _offset: torch.Tensor | float """The offset applied to the input action.""" + _clip: dict[str, tuple] | None = None + """The clip applied to the input action.""" def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> None: # initialize the action term @@ -94,6 +96,12 @@ def __init__(self, cfg: actions_cfg.JointActionCfg, env: ManagerBasedEnv) -> Non self._offset[:, index_list] = torch.tensor(value_list, device=self.device) else: raise ValueError(f"Unsupported offset type: {type(cfg.offset)}. Supported types are float and dict.") + # parse clip + if cfg.clip is not None: + if isinstance(cfg.clip, dict): + self._clip = cfg.clip + else: + raise ValueError(f"Unsupported clip type: {type(cfg.scale)}. Supported types are dict.") """ Properties. @@ -120,6 +128,13 @@ def process_actions(self, actions: torch.Tensor): self._raw_actions[:] = actions # apply the affine transformations self._processed_actions = self._raw_actions * self._scale + self._offset + # clip actions + if self._clip is not None: + # resolve the dictionary config + index_list, _, value_list = string_utils.resolve_matching_names_values(self._clip, self._joint_names) + for index in range(len(index_list)): + min_value, max_value = value_list[index] + self._processed_actions[:, index_list[index]].clip_(min_value, max_value) def reset(self, env_ids: Sequence[int] | None = None) -> None: self._raw_actions[env_ids] = 0.0