File size: 4,268 Bytes
406662d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | # Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md).
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
import glob
import os
from tensorboard.backend.event_processing import event_accumulator
from isaacsim.benchmark.services import BaseIsaacBenchmark
from isaacsim.benchmark.services.metrics.measurements import DictMeasurement, ListMeasurement, SingleMeasurement
def parse_tf_logs(log_dir: str):
"""Search for the latest tfevents file in log_dir folder and returns
the tensorboard logs in a dictionary.
Args:
log_dir: directory used to search for tfevents files
"""
# search log directory for latest log file
list_of_files = glob.glob(f"{log_dir}/events*") # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
log_data = {}
ea = event_accumulator.EventAccumulator(latest_file)
ea.Reload()
tags = ea.Tags()["scalars"]
for tag in tags:
log_data[tag] = []
for event in ea.Scalars(tag):
log_data[tag].append(event.value)
return log_data
#############################
# logging benchmark metrics #
#############################
def log_min_max_mean_stats(benchmark: BaseIsaacBenchmark, values: dict):
for k, v in values.items():
measurement = SingleMeasurement(name=f"Min {k}", value=min(v), unit="ms")
benchmark.store_custom_measurement("runtime", measurement)
measurement = SingleMeasurement(name=f"Max {k}", value=max(v), unit="ms")
benchmark.store_custom_measurement("runtime", measurement)
measurement = SingleMeasurement(name=f"Mean {k}", value=sum(v) / len(v), unit="ms")
benchmark.store_custom_measurement("runtime", measurement)
def log_app_start_time(benchmark: BaseIsaacBenchmark, value: float):
measurement = SingleMeasurement(name="App Launch Time", value=value, unit="ms")
benchmark.store_custom_measurement("startup", measurement)
def log_python_imports_time(benchmark: BaseIsaacBenchmark, value: float):
measurement = SingleMeasurement(name="Python Imports Time", value=value, unit="ms")
benchmark.store_custom_measurement("startup", measurement)
def log_task_start_time(benchmark: BaseIsaacBenchmark, value: float):
measurement = SingleMeasurement(name="Task Creation and Start Time", value=value, unit="ms")
benchmark.store_custom_measurement("startup", measurement)
def log_scene_creation_time(benchmark: BaseIsaacBenchmark, value: float):
measurement = SingleMeasurement(name="Scene Creation Time", value=value, unit="ms")
benchmark.store_custom_measurement("startup", measurement)
def log_simulation_start_time(benchmark: BaseIsaacBenchmark, value: float):
measurement = SingleMeasurement(name="Simulation Start Time", value=value, unit="ms")
benchmark.store_custom_measurement("startup", measurement)
def log_total_start_time(benchmark: BaseIsaacBenchmark, value: float):
measurement = SingleMeasurement(name="Total Start Time (Launch to Train)", value=value, unit="ms")
benchmark.store_custom_measurement("startup", measurement)
def log_runtime_step_times(benchmark: BaseIsaacBenchmark, value: dict, compute_stats=True):
measurement = DictMeasurement(name="Step Frametimes", value=value)
benchmark.store_custom_measurement("runtime", measurement)
if compute_stats:
log_min_max_mean_stats(benchmark, value)
def log_rl_policy_rewards(benchmark: BaseIsaacBenchmark, value: list):
measurement = ListMeasurement(name="Rewards", value=value)
benchmark.store_custom_measurement("train", measurement)
# log max reward
measurement = SingleMeasurement(name="Max Rewards", value=max(value), unit="float")
benchmark.store_custom_measurement("train", measurement)
def log_rl_policy_episode_lengths(benchmark: BaseIsaacBenchmark, value: list):
measurement = ListMeasurement(name="Episode Lengths", value=value)
benchmark.store_custom_measurement("train", measurement)
# log max episode length
measurement = SingleMeasurement(name="Max Episode Lengths", value=max(value), unit="float")
benchmark.store_custom_measurement("train", measurement)
|