| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.42661282420158386, | |
| "min": 0.42661282420158386, | |
| "max": 1.4351681470870972, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 12921.2490234375, | |
| "min": 12921.2490234375, | |
| "max": 43537.26171875, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989962.0, | |
| "min": 29952.0, | |
| "max": 989962.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989962.0, | |
| "min": 29952.0, | |
| "max": 989962.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.4852186441421509, | |
| "min": -0.13499201834201813, | |
| "max": 0.5720705986022949, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 132.46469116210938, | |
| "min": -32.533077239990234, | |
| "max": 155.0311279296875, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.025193121284246445, | |
| "min": 0.012406974099576473, | |
| "max": 0.5613970756530762, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 6.877722263336182, | |
| "min": 3.3622899055480957, | |
| "max": 141.47206115722656, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06708143480050023, | |
| "min": 0.0627065204759422, | |
| "max": 0.07369771965192316, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9391400872070033, | |
| "min": 0.5158840375634621, | |
| "max": 1.073817646136626, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.01377473336088726, | |
| "min": 0.000786386372923066, | |
| "max": 0.049710626967020756, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.19284626705242164, | |
| "min": 0.008650250102153726, | |
| "max": 0.7456594045053113, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.530054632871431e-06, | |
| "min": 7.530054632871431e-06, | |
| "max": 0.00029515063018788575, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010542076486020003, | |
| "min": 0.00010542076486020003, | |
| "max": 0.0035074394308536004, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10250998571428574, | |
| "min": 0.10250998571428574, | |
| "max": 0.19838354285714285, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4351398000000004, | |
| "min": 1.3886848, | |
| "max": 2.5724618999999995, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.000260747572857143, | |
| "min": 0.000260747572857143, | |
| "max": 0.00983851593142857, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.003650466020000002, | |
| "min": 0.003650466020000002, | |
| "max": 0.11693772536, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.010629934258759022, | |
| "min": 0.010629934258759022, | |
| "max": 0.3827984929084778, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.14881907403469086, | |
| "min": 0.14881907403469086, | |
| "max": 2.6795895099639893, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 391.05263157894734, | |
| "min": 315.5698924731183, | |
| "max": 999.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 29720.0, | |
| "min": 15984.0, | |
| "max": 33136.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.446722647746404, | |
| "min": -1.0000000521540642, | |
| "max": 1.6414021140241877, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 108.5041985809803, | |
| "min": -29.913001619279385, | |
| "max": 154.29179871827364, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.446722647746404, | |
| "min": -1.0000000521540642, | |
| "max": 1.6414021140241877, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 108.5041985809803, | |
| "min": -29.913001619279385, | |
| "max": 154.29179871827364, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.04375640034675598, | |
| "min": 0.036383640326830766, | |
| "max": 6.595312531106174, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 3.2817300260066986, | |
| "min": 3.185614061949309, | |
| "max": 105.52500049769878, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1679214608", | |
| "python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1679216728" | |
| }, | |
| "total": 2119.9186658400004, | |
| "count": 1, | |
| "self": 0.4885479950003173, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.1019306250000227, | |
| "count": 1, | |
| "self": 0.1019306250000227 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2119.32818722, | |
| "count": 1, | |
| "self": 1.5074590329090825, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 6.080862118999903, | |
| "count": 1, | |
| "self": 6.080862118999903 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2111.6495691970904, | |
| "count": 63829, | |
| "self": 1.4765579171130412, | |
| "children": { | |
| "env_step": { | |
| "total": 1500.8434629269727, | |
| "count": 63829, | |
| "self": 1391.2299272500886, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 108.72991550888992, | |
| "count": 63829, | |
| "self": 4.767345487919101, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 103.96257002097082, | |
| "count": 62557, | |
| "self": 103.96257002097082 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.8836201679941951, | |
| "count": 63829, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2114.752070794908, | |
| "count": 63829, | |
| "is_parallel": true, | |
| "self": 842.5170535479065, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.002816915000039444, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0006963209998502862, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0021205940001891577, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0021205940001891577 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.05856046500002776, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0006021429999236716, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0005100970001876703, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0005100970001876703 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.05560618199979217, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.05560618199979217 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0018420430001242494, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004097669993825548, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0014322760007416946, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0014322760007416946 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1272.2350172470015, | |
| "count": 63828, | |
| "is_parallel": true, | |
| "self": 30.676713851984005, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 22.37188011700937, | |
| "count": 63828, | |
| "is_parallel": true, | |
| "self": 22.37188011700937 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1128.8535302699906, | |
| "count": 63828, | |
| "is_parallel": true, | |
| "self": 1128.8535302699906 | |
| }, | |
| "steps_from_proto": { | |
| "total": 90.33289300801744, | |
| "count": 63828, | |
| "is_parallel": true, | |
| "self": 19.5236570719494, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 70.80923593606803, | |
| "count": 510624, | |
| "is_parallel": true, | |
| "self": 70.80923593606803 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 609.3295483530046, | |
| "count": 63829, | |
| "self": 2.795314255037738, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 115.2220620709586, | |
| "count": 63829, | |
| "self": 114.9575418249583, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.264520246000302, | |
| "count": 2, | |
| "self": 0.264520246000302 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 491.3121720270083, | |
| "count": 450, | |
| "self": 312.82163911499947, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 178.4905329120088, | |
| "count": 22821, | |
| "self": 178.4905329120088 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 9.229997885995544e-07, | |
| "count": 1, | |
| "self": 9.229997885995544e-07 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.0902959480008576, | |
| "count": 1, | |
| "self": 0.0018645580003067153, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.08843139000055089, | |
| "count": 1, | |
| "self": 0.08843139000055089 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |