Pyramids / run_logs /timers.json
114-HK's picture
First Push
9a82dc9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6788015365600586,
"min": 0.6584268808364868,
"max": 1.430067777633667,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20548.6796875,
"min": 19573.71484375,
"max": 43382.53515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989962.0,
"min": 29952.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989962.0,
"min": 29952.0,
"max": 989962.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.22044672071933746,
"min": -0.1042863130569458,
"max": 0.22044672071933746,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 56.43436050415039,
"min": -25.028715133666992,
"max": 56.43436050415039,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.13427922129631042,
"min": -0.008350392803549767,
"max": 0.5738222002983093,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 34.37548065185547,
"min": -2.1042990684509277,
"max": 135.99586486816406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06644048417803601,
"min": 0.06420781042447042,
"max": 0.07488525516582367,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9301667784925041,
"min": 0.5111314987667753,
"max": 1.0483935723215314,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012587026887788035,
"min": 6.051200245067871e-05,
"max": 0.013551025983210578,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17621837642903249,
"min": 0.0007866560318588232,
"max": 0.1897143637649481,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.770997409699998e-06,
"min": 7.770997409699998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010879396373579997,
"min": 0.00010879396373579997,
"max": 0.0033817499727500995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10259030000000001,
"min": 0.10259030000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4362642,
"min": 1.3691136000000002,
"max": 2.5272498999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026877096999999996,
"min": 0.00026877096999999996,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037627935799999994,
"min": 0.0037627935799999994,
"max": 0.11275226500999996,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010249127633869648,
"min": 0.009904943406581879,
"max": 0.45586416125297546,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14348778128623962,
"min": 0.1386692076921463,
"max": 3.191049098968506,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 630.1020408163265,
"min": 601.9166666666666,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30875.0,
"min": 15984.0,
"max": 33029.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8391469041911923,
"min": -1.0000000521540642,
"max": 0.8639813574940659,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 41.11819830536842,
"min": -32.000001668930054,
"max": 41.11819830536842,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8391469041911923,
"min": -1.0000000521540642,
"max": 0.8639813574940659,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 41.11819830536842,
"min": -32.000001668930054,
"max": 41.11819830536842,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0664938372794161,
"min": 0.0622650050354423,
"max": 9.697507862001657,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.258198026691389,
"min": 2.9887202417012304,
"max": 155.16012579202652,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740152209",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740154307"
},
"total": 2098.096080585,
"count": 1,
"self": 0.4927201700002115,
"children": {
"run_training.setup": {
"total": 0.023352027000044018,
"count": 1,
"self": 0.023352027000044018
},
"TrainerController.start_learning": {
"total": 2097.580008388,
"count": 1,
"self": 1.4969150109282054,
"children": {
"TrainerController._reset_env": {
"total": 3.653614986999969,
"count": 1,
"self": 3.653614986999969
},
"TrainerController.advance": {
"total": 2092.3330233940715,
"count": 63246,
"self": 1.5052409660947887,
"children": {
"env_step": {
"total": 1399.5210020489662,
"count": 63246,
"self": 1232.427429790047,
"children": {
"SubprocessEnvManager._take_step": {
"total": 166.223349784962,
"count": 63246,
"self": 4.936128756991309,
"children": {
"TorchPolicy.evaluate": {
"total": 161.2872210279707,
"count": 62576,
"self": 161.2872210279707
}
}
},
"workers": {
"total": 0.870222473957142,
"count": 63246,
"self": 0.0,
"children": {
"worker_root": {
"total": 2092.46072918605,
"count": 63246,
"is_parallel": true,
"self": 979.2175691410607,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006457656999998562,
"count": 1,
"is_parallel": true,
"self": 0.004116666000015812,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023409909999827505,
"count": 8,
"is_parallel": true,
"self": 0.0023409909999827505
}
}
},
"UnityEnvironment.step": {
"total": 0.05683363200000713,
"count": 1,
"is_parallel": true,
"self": 0.0005793320000293534,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005194469999878493,
"count": 1,
"is_parallel": true,
"self": 0.0005194469999878493
},
"communicator.exchange": {
"total": 0.0538441139999577,
"count": 1,
"is_parallel": true,
"self": 0.0538441139999577
},
"steps_from_proto": {
"total": 0.0018907390000322266,
"count": 1,
"is_parallel": true,
"self": 0.0005183600000577826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001372378999974444,
"count": 8,
"is_parallel": true,
"self": 0.001372378999974444
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1113.243160044989,
"count": 63245,
"is_parallel": true,
"self": 33.07225994902137,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.881280706992072,
"count": 63245,
"is_parallel": true,
"self": 23.881280706992072
},
"communicator.exchange": {
"total": 956.4821729009911,
"count": 63245,
"is_parallel": true,
"self": 956.4821729009911
},
"steps_from_proto": {
"total": 99.80744648798452,
"count": 63245,
"is_parallel": true,
"self": 20.355389210076396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.45205727790812,
"count": 505960,
"is_parallel": true,
"self": 79.45205727790812
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 691.3067803790104,
"count": 63246,
"self": 2.762860774960018,
"children": {
"process_trajectory": {
"total": 131.13785808005247,
"count": 63246,
"self": 130.89114265505242,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24671542500004762,
"count": 2,
"self": 0.24671542500004762
}
}
},
"_update_policy": {
"total": 557.406061523998,
"count": 432,
"self": 307.1504892280028,
"children": {
"TorchPPOOptimizer.update": {
"total": 250.2555722959952,
"count": 22812,
"self": 250.2555722959952
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0000003385357559e-06,
"count": 1,
"self": 1.0000003385357559e-06
},
"TrainerController._save_models": {
"total": 0.09645399600003657,
"count": 1,
"self": 0.0015225379997900745,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0949314580002465,
"count": 1,
"self": 0.0949314580002465
}
}
}
}
}
}
}