Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions scripts/environments/random_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

need to add new visualizer flag to AppLauncher

# parse the arguments
Expand Down Expand Up @@ -52,8 +51,8 @@ def main():
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)

# create environment
env = gym.make(args_cli.task, cfg=env_cfg)

Expand Down
3 changes: 1 addition & 2 deletions scripts/environments/zero_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
)
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -52,8 +51,8 @@ def main():
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)

# create environment
env = gym.make(args_cli.task, cfg=env_cfg)

Expand Down
8 changes: 2 additions & 6 deletions scripts/reinforcement_learning/rl_games/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -80,12 +79,9 @@ def main():
task_name = args_cli.task.split(":")[-1]
# parse env configuration
env_cfg = parse_env_cfg(
args_cli.task,
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)

agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point")

# specify directory for logging experiments
Expand Down
2 changes: 0 additions & 2 deletions scripts/reinforcement_learning/rl_games/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
const=True,
help="if toggled, this experiment will be tracked with Weights and Biases",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -90,7 +89,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# override configurations with non-hydra CLI arguments
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# randomly sample a seed if seed = -1
if args_cli.seed == -1:
Expand Down
15 changes: 0 additions & 15 deletions scripts/reinforcement_learning/rsl_rl/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@
help="Use the pre-trained checkpoint from Nucleus.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
Expand Down Expand Up @@ -96,7 +95,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
# note: certain randomizations occur in the environment initialization so we set the seed here
env_cfg.seed = agent_cfg.seed
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# specify directory for logging experiments
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
Expand All @@ -117,19 +115,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
# set the log directory for the environment (works for all environment types)
env_cfg.log_dir = log_dir

# Set play mode for Newton viewer if using Newton visualizer
if args_cli.newton_visualizer:
# Set visualizer to play mode in Newton config
if hasattr(env_cfg.sim, "newton_cfg"):
env_cfg.sim.newton_cfg.visualizer_train_mode = False
else:
# Create newton_cfg if it doesn't exist
from isaaclab.sim._impl.newton_manager_cfg import NewtonCfg

newton_cfg = NewtonCfg()
newton_cfg.visualizer_train_mode = False
env_cfg.sim.newton_cfg = newton_cfg

# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)

Expand Down
2 changes: 0 additions & 2 deletions scripts/reinforcement_learning/rsl_rl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
)
parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append RSL-RL cli arguments
cli_args.add_rsl_rl_args(parser)
# append AppLauncher cli args
Expand Down Expand Up @@ -119,7 +118,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlBaseRun
# note: certain randomizations occur in the environment initialization so we set the seed here
env_cfg.seed = agent_cfg.seed
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# multi-gpu training configuration
if args_cli.distributed:
Expand Down
7 changes: 1 addition & 6 deletions scripts/reinforcement_learning/sb3/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
default=False,
help="Use a slower SB3 wrapper but keep all the extra training info.",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -83,11 +82,7 @@ def main():
"""Play with stable-baselines agent."""
# parse configuration
env_cfg = parse_env_cfg(
args_cli.task,
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)

task_name = args_cli.task.split(":")[-1]
Expand Down
2 changes: 0 additions & 2 deletions scripts/reinforcement_learning/sb3/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
default=False,
help="Use a slower SB3 wrapper but keep all the extra training info.",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
Expand Down Expand Up @@ -113,7 +112,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# note: certain randomizations occur in the environment initialization so we set the seed here
env_cfg.seed = agent_cfg["seed"]
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# directory for logging into
run_info = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
Expand Down
7 changes: 1 addition & 6 deletions scripts/reinforcement_learning/skrl/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
help="The RL algorithm used for training the skrl agent.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
Expand Down Expand Up @@ -112,11 +111,7 @@ def main():

# parse configuration
env_cfg = parse_env_cfg(
args_cli.task,
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
)
try:
experiment_cfg = load_cfg_from_registry(task_name, f"skrl_{algorithm}_cfg_entry_point")
Expand Down
2 changes: 0 additions & 2 deletions scripts/reinforcement_learning/skrl/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
choices=["AMP", "PPO", "IPPO", "MAPPO"],
help="The RL algorithm used for training the skrl agent.",
)
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
Expand Down Expand Up @@ -113,7 +112,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# override configurations with non-hydra CLI arguments
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
env_cfg.sim.enable_newton_rendering = args_cli.newton_visualizer

# multi-gpu training config
if args_cli.distributed:
Expand Down
3 changes: 1 addition & 2 deletions scripts/sim2sim_transfer/rsl_rl_transfer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
help="Use the pre-trained checkpoint from Nucleus.",
)
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
parser.add_argument("--newton_visualizer", action="store_true", default=False, help="Enable Newton rendering.")
# Joint ordering arguments
parser.add_argument(
"--policy_transfer_file",
Expand Down Expand Up @@ -147,8 +146,8 @@ def main():
device=args_cli.device,
num_envs=args_cli.num_envs,
use_fabric=not args_cli.disable_fabric,
newton_visualizer=args_cli.newton_visualizer,
)

agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(task_name, args_cli)

# specify directory for logging experiments
Expand Down
34 changes: 34 additions & 0 deletions source/isaaclab/isaaclab/envs/ui/base_env_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,9 @@ def __init__(self, env: ManagerBasedEnv, window_name: str = "IsaacLab"):
# Listeners for environment selection changes
self._ui_listeners: list[ManagerLiveVisualizer] = []

# Check if any visualizer has live plots enabled
self._enable_live_plots = self._check_live_plots_enabled()

print("Creating window for environment.")
# create window for UI
self.ui_window = omni.ui.Window(
Expand Down Expand Up @@ -102,6 +105,29 @@ def __del__(self):
self.ui_window.destroy()
self.ui_window = None

"""
Helper methods.
"""

def _check_live_plots_enabled(self) -> bool:
"""Check if any visualizer has live plots enabled.

Returns:
True if any visualizer supports and has live plots enabled, False otherwise.
"""
# Check if simulation has visualizers
if not hasattr(self.env.sim, "_visualizers"):
return False

# Check each visualizer
for visualizer in self.env.sim._visualizers:
# Check if visualizer supports live plots and has it enabled
if hasattr(visualizer, "cfg") and hasattr(visualizer.cfg, "enable_live_plots"):
if visualizer.supports_live_plots() and visualizer.cfg.enable_live_plots:
return True

return False

"""
Build sub-sections of the UI.
"""
Expand Down Expand Up @@ -421,6 +447,11 @@ def _create_debug_vis_ui_element(self, name: str, elem: object):
is_checked = (hasattr(elem.cfg, "debug_vis") and elem.cfg.debug_vis) or (
hasattr(elem, "debug_vis") and elem.debug_vis
)

# Auto-enable live plots for ManagerLiveVisualizer if visualizer has enable_live_plots=True
if isinstance(elem, ManagerLiveVisualizer) and self._enable_live_plots:
is_checked = True

self.ui_window_elements[f"{name}_cb"] = SimpleCheckBox(
model=omni.ui.SimpleBoolModel(),
enabled=elem.has_debug_vis_implementation,
Expand All @@ -435,6 +466,9 @@ def _create_debug_vis_ui_element(self, name: str, elem: object):
if not elem.set_vis_frame(self.ui_window_elements[f"{name}_panel"]):
print(f"Frame failed to set for ManagerLiveVisualizer: {name}")

# Pass the enable_live_plots flag to the visualizer
elem._auto_expand_frames = self._enable_live_plots

# Add listener for environment selection changes
if isinstance(elem, ManagerLiveVisualizer):
self._ui_listeners.append(elem)
Expand Down
Loading
Loading