karthiklnagar16's picture
First Push
68e2bbc verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.29161450266838074,
"min": 0.26832109689712524,
"max": 1.4448745250701904,
"count": 56
},
"Pyramids.Policy.Entropy.sum": {
"value": 8692.4453125,
"min": 8046.072265625,
"max": 43831.71484375,
"count": 56
},
"Pyramids.Step.mean": {
"value": 1679902.0,
"min": 29952.0,
"max": 1679902.0,
"count": 56
},
"Pyramids.Step.sum": {
"value": 1679902.0,
"min": 29952.0,
"max": 1679902.0,
"count": 56
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7371307015419006,
"min": -0.09672346711158752,
"max": 0.7979226112365723,
"count": 56
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 217.45355224609375,
"min": -23.31035614013672,
"max": 234.58924865722656,
"count": 56
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02280416339635849,
"min": -0.009110200218856335,
"max": 0.34660589694976807,
"count": 56
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.727228164672852,
"min": -2.3156561851501465,
"max": 83.53202056884766,
"count": 56
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06929605025634221,
"min": 0.06526765752988424,
"max": 0.0735187948817232,
"count": 56
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9701447035887909,
"min": 0.5087351455263213,
"max": 1.102781923225848,
"count": 56
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014207130402979749,
"min": 0.0002094138638317064,
"max": 0.016501869111033598,
"count": 56
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1988998256417165,
"min": 0.002722380229812183,
"max": 0.24538141228064583,
"count": 56
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001334438983758619,
"min": 0.0001334438983758619,
"max": 0.00029838354339596195,
"count": 56
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0018682145772620665,
"min": 0.0018682145772620665,
"max": 0.0038852456049181666,
"count": 56
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14448128095238097,
"min": 0.14448128095238097,
"max": 0.19946118095238097,
"count": 56
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.0227379333333335,
"min": 1.3962282666666668,
"max": 2.6950818333333335,
"count": 56
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004453679967142857,
"min": 0.004453679967142857,
"max": 0.009946171977142856,
"count": 56
},
"Pyramids.Policy.Beta.sum": {
"value": 0.06235151953999999,
"min": 0.06235151953999999,
"max": 0.12951867515,
"count": 56
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005804836750030518,
"min": 0.005547815002501011,
"max": 0.40933331847190857,
"count": 56
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08126771450042725,
"min": 0.0776694118976593,
"max": 2.865333318710327,
"count": 56
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 249.71311475409837,
"min": 240.1900826446281,
"max": 999.0,
"count": 56
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30465.0,
"min": 15984.0,
"max": 32648.0,
"count": 56
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7502868754453347,
"min": -1.0000000521540642,
"max": 1.7527295010255985,
"count": 56
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 213.53499880433083,
"min": -31.996001660823822,
"max": 213.83299912512302,
"count": 56
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7502868754453347,
"min": -1.0000000521540642,
"max": 1.7527295010255985,
"count": 56
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 213.53499880433083,
"min": -31.996001660823822,
"max": 213.83299912512302,
"count": 56
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014921443852546842,
"min": 0.014662963339714738,
"max": 8.126681023277342,
"count": 56
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8204161500107148,
"min": 1.686240784067195,
"max": 130.02689637243748,
"count": 56
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 56
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 56
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1774003215",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1774007195"
},
"total": 3980.1464101170004,
"count": 1,
"self": 0.41625770700011344,
"children": {
"run_training.setup": {
"total": 0.03520064500003173,
"count": 1,
"self": 0.03520064500003173
},
"TrainerController.start_learning": {
"total": 3979.694951765,
"count": 1,
"self": 2.316845138917415,
"children": {
"TrainerController._reset_env": {
"total": 3.435058239,
"count": 1,
"self": 3.435058239
},
"TrainerController.advance": {
"total": 3973.809470198083,
"count": 108490,
"self": 2.4656409260237524,
"children": {
"env_step": {
"total": 2844.1607173239854,
"count": 108490,
"self": 2582.4004485938763,
"children": {
"SubprocessEnvManager._take_step": {
"total": 260.3345160460326,
"count": 108490,
"self": 8.062287990032928,
"children": {
"TorchPolicy.evaluate": {
"total": 252.27222805599968,
"count": 105310,
"self": 252.27222805599968
}
}
},
"workers": {
"total": 1.4257526840765422,
"count": 108489,
"self": 0.0,
"children": {
"worker_root": {
"total": 3968.560015425031,
"count": 108489,
"is_parallel": true,
"self": 1588.4012289901093,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007264788000156841,
"count": 1,
"is_parallel": true,
"self": 0.004998805000013817,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022659830001430237,
"count": 8,
"is_parallel": true,
"self": 0.0022659830001430237
}
}
},
"UnityEnvironment.step": {
"total": 0.05688294800006588,
"count": 1,
"is_parallel": true,
"self": 0.0005364270002701232,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048206199994638155,
"count": 1,
"is_parallel": true,
"self": 0.00048206199994638155
},
"communicator.exchange": {
"total": 0.05430029499984812,
"count": 1,
"is_parallel": true,
"self": 0.05430029499984812
},
"steps_from_proto": {
"total": 0.0015641640000012558,
"count": 1,
"is_parallel": true,
"self": 0.00035776199956671917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012064020004345366,
"count": 8,
"is_parallel": true,
"self": 0.0012064020004345366
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2380.1587864349217,
"count": 108488,
"is_parallel": true,
"self": 58.77171501287512,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 39.57980959893621,
"count": 108488,
"is_parallel": true,
"self": 39.57980959893621
},
"communicator.exchange": {
"total": 2096.553100965014,
"count": 108488,
"is_parallel": true,
"self": 2096.553100965014
},
"steps_from_proto": {
"total": 185.25416085809616,
"count": 108488,
"is_parallel": true,
"self": 38.8669858294752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 146.38717502862096,
"count": 867904,
"is_parallel": true,
"self": 146.38717502862096
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1127.1831119480742,
"count": 108489,
"self": 4.734776398998974,
"children": {
"process_trajectory": {
"total": 215.01470632207815,
"count": 108489,
"self": 214.66923381607808,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3454725060000783,
"count": 3,
"self": 0.3454725060000783
}
}
},
"_update_policy": {
"total": 907.433629226997,
"count": 771,
"self": 501.85991378000654,
"children": {
"TorchPPOOptimizer.update": {
"total": 405.5737154469905,
"count": 38421,
"self": 405.5737154469905
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.526999767520465e-06,
"count": 1,
"self": 1.526999767520465e-06
},
"TrainerController._save_models": {
"total": 0.1335766619995411,
"count": 1,
"self": 0.0014462199997069547,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13213044199983415,
"count": 1,
"self": 0.13213044199983415
}
}
}
}
}
}
}