Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 38 additions & 20 deletions scripts/benchmarks/benchmark_non_rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,19 +56,28 @@

"""Rest everything follows."""

# enable benchmarking extension
from isaacsim.core.utils.extensions import enable_extension

enable_extension("isaacsim.benchmark.services")

# Set the benchmark settings according to the inputs
import carb

settings = carb.settings.get_settings()
settings.set("/exts/isaacsim.benchmark.services/metrics/metrics_output_folder", args_cli.output_folder)
settings.set("/exts/isaacsim.benchmark.services/metrics/randomize_filename_prefix", True)

from isaacsim.benchmark.services import BaseIsaacBenchmark
try:
# enable benchmarking extension (Isaac Sim)
from isaacsim.core.utils.extensions import enable_extension
import carb
from isaacsim.benchmark.services import BaseIsaacBenchmark as _IsaacBenchmark

_ISAACSIM_BENCHMARK_AVAILABLE = True
except ModuleNotFoundError:
_ISAACSIM_BENCHMARK_AVAILABLE = False

if _ISAACSIM_BENCHMARK_AVAILABLE and simulation_app is not None:
BaseIsaacBenchmark = _IsaacBenchmark
_BENCHMARK_SERVICES_AVAILABLE = True
enable_extension("isaacsim.benchmark.services")
# Set the benchmark settings according to the inputs
settings = carb.settings.get_settings()
settings.set("/exts/isaacsim.benchmark.services/metrics/metrics_output_folder", args_cli.output_folder)
settings.set("/exts/isaacsim.benchmark.services/metrics/randomize_filename_prefix", True)
else:
from scripts.benchmarks.kitless_reporter import KitlessBenchmark as BaseIsaacBenchmark

_BENCHMARK_SERVICES_AVAILABLE = False

sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))

Expand Down Expand Up @@ -104,9 +113,10 @@


# Create the benchmark
benchmark = BaseIsaacBenchmark(
benchmark_name="benchmark_non_rl",
workflow_metadata={
benchmark_backend = args_cli.benchmark_backend if _BENCHMARK_SERVICES_AVAILABLE else "kitless"
benchmark_kwargs = {
"benchmark_name": "benchmark_non_rl",
"workflow_metadata": {
"metadata": [
{"name": "task", "data": args_cli.task},
{"name": "seed", "data": args_cli.seed},
Expand All @@ -117,8 +127,11 @@
{"name": "Newton Info", "data": get_newton_version()},
]
},
backend_type=args_cli.benchmark_backend,
)
"backend_type": benchmark_backend,
}
if not _BENCHMARK_SERVICES_AVAILABLE:
benchmark_kwargs["output_dir"] = args_cli.output_folder
benchmark = BaseIsaacBenchmark(**benchmark_kwargs)


@hydra_task_config(args_cli.task, None)
Expand Down Expand Up @@ -165,7 +178,11 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
num_frames = 0
# log frame times
step_times = []
while simulation_app.is_running():
if simulation_app is None:
app_running = True
else:
app_running = simulation_app.is_running()
while app_running:
while num_frames < args_cli.num_frames:
# get upper and lower bounds of action space, sample actions randomly on this interval
action_high = 1
Expand Down Expand Up @@ -218,4 +235,5 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# run the main function
main()
# close sim app
simulation_app.close()
if simulation_app is not None:
simulation_app.close()
85 changes: 72 additions & 13 deletions scripts/benchmarks/benchmark_rlgames.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
choices=["LocalLogMetrics", "JSONFileMetrics", "OsmoKPIFile", "OmniPerfKPIFile"],
help="Benchmarking backend options, defaults OmniPerfKPIFile",
)
parser.add_argument("--output_folder", type=str, default=None, help="Output folder for the benchmark.")

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
Expand All @@ -54,11 +55,23 @@

"""Rest everything follows."""

# enable benchmarking extension
from isaacsim.core.utils.extensions import enable_extension
try:
# enable benchmarking extension (Isaac Sim)
from isaacsim.core.utils.extensions import enable_extension
from isaacsim.benchmark.services import BaseIsaacBenchmark as _IsaacBenchmark

enable_extension("isaacsim.benchmark.services")
from isaacsim.benchmark.services import BaseIsaacBenchmark
_ISAACSIM_BENCHMARK_AVAILABLE = True
except ModuleNotFoundError:
_ISAACSIM_BENCHMARK_AVAILABLE = False

if _ISAACSIM_BENCHMARK_AVAILABLE and simulation_app is not None:
BaseIsaacBenchmark = _IsaacBenchmark
_BENCHMARK_SERVICES_AVAILABLE = True
enable_extension("isaacsim.benchmark.services")
else:
from scripts.benchmarks.kitless_reporter import KitlessBenchmark as BaseIsaacBenchmark

_BENCHMARK_SERVICES_AVAILABLE = False

imports_time_begin = time.perf_counter_ns()

Expand Down Expand Up @@ -107,18 +120,22 @@


# Create the benchmark
benchmark = BaseIsaacBenchmark(
benchmark_name="benchmark_rlgames_train",
workflow_metadata={
benchmark_backend = args_cli.benchmark_backend if _BENCHMARK_SERVICES_AVAILABLE else "kitless"
benchmark_kwargs = {
"benchmark_name": "benchmark_rlgames_train",
"workflow_metadata": {
"metadata": [
{"name": "task", "data": args_cli.task},
{"name": "seed", "data": args_cli.seed},
{"name": "num_envs", "data": args_cli.num_envs},
{"name": "max_iterations", "data": args_cli.max_iterations},
]
},
backend_type=args_cli.benchmark_backend,
)
"backend_type": benchmark_backend,
}
if not _BENCHMARK_SERVICES_AVAILABLE:
benchmark_kwargs["output_dir"] = args_cli.output_folder
benchmark = BaseIsaacBenchmark(**benchmark_kwargs)


@hydra_task_config(args_cli.task, "rl_games_cfg_entry_point")
Expand All @@ -136,10 +153,12 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):

# process distributed
world_rank = 0
world_size = int(os.getenv("WORLD_SIZE", 1))
if args_cli.distributed:
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
agent_cfg["params"]["config"]["device"] = f"cuda:{app_launcher.local_rank}"
world_rank = app_launcher.global_rank
world_size = int(os.getenv("WORLD_SIZE", 1))

# specify directory for logging experiments
log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"])
Expand Down Expand Up @@ -227,16 +246,55 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
tensorboard_log_dir = os.path.join(log_root_path, log_dir, "summaries")
log_data = parse_tf_logs(tensorboard_log_dir)

def _detect_step_time_scale(step_time: list | np.ndarray, step_fps: list | np.ndarray):
"""Detect whether step times are in seconds or milliseconds.

Returns a scale factor to convert raw values to milliseconds.
"""
override = os.getenv("RL_GAMES_STEP_TIME_UNIT", "").strip().lower()
if override in {"ms", "millis", "milliseconds"}:
return 1.0
if override in {"s", "sec", "secs", "seconds"}:
return 1000.0
times = np.array(step_time, dtype=float)
fps = np.array(step_fps, dtype=float)
valid = (times > 0) & np.isfinite(times) & (fps > 0) & np.isfinite(fps)
if not np.any(valid):
return 1000.0
median_time = float(np.median(times[valid]))
median_fps = float(np.median(fps[valid]))
if median_fps <= 0:
return 1000.0
diff_seconds = abs((1.0 / median_time) - median_fps)
diff_ms = abs((1000.0 / median_time) - median_fps)
if diff_ms < diff_seconds:
return 1.0
return 1000.0

time_scale = _detect_step_time_scale(log_data["performance/step_time"], log_data["performance/step_fps"])

# prepare RL timing dict
rl_training_times = {
"Environment only step time": log_data["performance/step_time"],
"Environment + Inference step time": log_data["performance/step_inference_time"],
"Environment + Inference + Policy update time": log_data["performance/rl_update_time"],
"Environment only step time": (np.array(log_data["performance/step_time"]) * time_scale).tolist(),
"Environment + Inference step time": (
np.array(log_data["performance/step_inference_time"]) * time_scale
).tolist(),
"Environment + Inference + Policy update time": (
np.array(log_data["performance/rl_update_time"]) * time_scale
).tolist(),
"Environment only FPS": log_data["performance/step_fps"],
"Environment + Inference FPS": log_data["performance/step_inference_fps"],
"Environment + Inference + Policy update FPS": log_data["performance/step_inference_rl_update_fps"],
}

horizon_length = agent_cfg.get("params", {}).get("config", {}).get("horizon_length")
steps_processed = None
if horizon_length is not None:
steps_per_iter = env.unwrapped.num_envs * int(horizon_length) * world_size
steps_processed = steps_per_iter * len(log_data["performance/step_time"])
if steps_processed is not None and hasattr(benchmark, "store_metadata_item"):
benchmark.store_metadata_item("num_frames", int(steps_processed))

# log additional metrics to benchmark services
log_app_start_time(benchmark, (app_start_time_end - app_start_time_begin) / 1e6)
log_python_imports_time(benchmark, (imports_time_end - imports_time_begin) / 1e6)
Expand All @@ -258,4 +316,5 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# run the main function
main()
# close sim app
simulation_app.close()
if simulation_app is not None:
simulation_app.close()
Loading