| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.4003141224384308, | |
| "min": 0.39134469628334045, | |
| "max": 1.4944818019866943, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 11977.3984375, | |
| "min": 11652.6796875, | |
| "max": 45336.6015625, | |
| "count": 32 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 959935.0, | |
| "min": 29947.0, | |
| "max": 959935.0, | |
| "count": 32 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 959935.0, | |
| "min": 29947.0, | |
| "max": 959935.0, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.5146921277046204, | |
| "min": -0.08552314341068268, | |
| "max": 0.574161171913147, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 140.51095581054688, | |
| "min": -20.6110782623291, | |
| "max": 158.46847534179688, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.04151313379406929, | |
| "min": 0.0020496181678026915, | |
| "max": 0.2596645951271057, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 11.333085060119629, | |
| "min": 0.5492976903915405, | |
| "max": 62.57917022705078, | |
| "count": 32 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.0687998960400196, | |
| "min": 0.0654457644291041, | |
| "max": 0.07439856213334549, | |
| "count": 32 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9631985445602743, | |
| "min": 0.488584716004472, | |
| "max": 1.0575224384796742, | |
| "count": 32 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.012694516616889912, | |
| "min": 0.000668431320429157, | |
| "max": 0.014908696715359661, | |
| "count": 32 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.17772323263645876, | |
| "min": 0.00868960716557904, | |
| "max": 0.20872175401503526, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 1.6462530226807145e-05, | |
| "min": 1.6462530226807145e-05, | |
| "max": 0.0002952331301603857, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.0002304754231753, | |
| "min": 0.0002304754231753, | |
| "max": 0.0037577932474022993, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10548747857142858, | |
| "min": 0.10548747857142858, | |
| "max": 0.19841104285714284, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4768247, | |
| "min": 1.3888772999999999, | |
| "max": 2.6525977, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0005581991092857145, | |
| "min": 0.0005581991092857145, | |
| "max": 0.009841263181428571, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.007814787530000002, | |
| "min": 0.007814787530000002, | |
| "max": 0.12527451023, | |
| "count": 32 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.010585588403046131, | |
| "min": 0.010585588403046131, | |
| "max": 0.3223348557949066, | |
| "count": 32 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.1481982320547104, | |
| "min": 0.1481982320547104, | |
| "max": 2.2563440799713135, | |
| "count": 32 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 399.72972972972974, | |
| "min": 332.7261904761905, | |
| "max": 987.09375, | |
| "count": 32 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 29580.0, | |
| "min": 16746.0, | |
| "max": 33200.0, | |
| "count": 32 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.51915402831258, | |
| "min": -0.9162387614769321, | |
| "max": 1.668447045192999, | |
| "count": 32 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 112.41739809513092, | |
| "min": -29.231601782143116, | |
| "max": 141.81799884140491, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.51915402831258, | |
| "min": -0.9162387614769321, | |
| "max": 1.668447045192999, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 112.41739809513092, | |
| "min": -29.231601782143116, | |
| "max": 141.81799884140491, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.04313082571555955, | |
| "min": 0.037625356123317036, | |
| "max": 5.517900224117672, | |
| "count": 32 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 3.1916811029514065, | |
| "min": 3.1916811029514065, | |
| "max": 93.80430381000042, | |
| "count": 32 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 32 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 32 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1722328040", | |
| "python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "1.1.0.dev0", | |
| "mlagents_envs_version": "1.1.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.3.1+cu121", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1722330167" | |
| }, | |
| "total": 2127.1229638229997, | |
| "count": 1, | |
| "self": 0.3384459919998335, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.08648732199981168, | |
| "count": 1, | |
| "self": 0.08648732199981168 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2126.698030509, | |
| "count": 1, | |
| "self": 1.3824298700274085, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 2.104344175000051, | |
| "count": 1, | |
| "self": 2.104344175000051 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2123.0780259039725, | |
| "count": 62872, | |
| "self": 1.465674241945635, | |
| "children": { | |
| "env_step": { | |
| "total": 1487.5903162240309, | |
| "count": 62872, | |
| "self": 1356.8307889290043, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 129.91240819697305, | |
| "count": 62872, | |
| "self": 4.8686691249797605, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 125.04373907199329, | |
| "count": 61645, | |
| "self": 125.04373907199329 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.8471190980535539, | |
| "count": 62871, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2121.5744823409455, | |
| "count": 62871, | |
| "is_parallel": true, | |
| "self": 886.1328977869491, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0021165709999877436, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0006889689998388349, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0014276020001489087, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0014276020001489087 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.05638080100015941, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0007150040003125469, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005104289998598688, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005104289998598688 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.05330223400005707, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.05330223400005707 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0018531339999299234, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00040642799990564527, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0014467060000242782, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0014467060000242782 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1235.4415845539963, | |
| "count": 62870, | |
| "is_parallel": true, | |
| "self": 33.16189835814589, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 24.02163148696195, | |
| "count": 62870, | |
| "is_parallel": true, | |
| "self": 24.02163148696195 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1079.1638750779568, | |
| "count": 62870, | |
| "is_parallel": true, | |
| "self": 1079.1638750779568 | |
| }, | |
| "steps_from_proto": { | |
| "total": 99.09417963093165, | |
| "count": 62870, | |
| "is_parallel": true, | |
| "self": 20.39782680286021, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 78.69635282807144, | |
| "count": 502960, | |
| "is_parallel": true, | |
| "self": 78.69635282807144 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 634.022035437996, | |
| "count": 62871, | |
| "self": 2.696731340970018, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 129.84177630302747, | |
| "count": 62871, | |
| "self": 129.7303246500278, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.1114516529996763, | |
| "count": 1, | |
| "self": 0.1114516529996763 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 501.4835277939985, | |
| "count": 451, | |
| "self": 298.26667327698874, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 203.21685451700978, | |
| "count": 22485, | |
| "self": 203.21685451700978 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.5489999896090012e-06, | |
| "count": 1, | |
| "self": 1.5489999896090012e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.13322901099991213, | |
| "count": 1, | |
| "self": 0.00199322900016341, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.13123578199974872, | |
| "count": 1, | |
| "self": 0.13123578199974872 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |