ppo-PyramidsRND / run_logs /timers.json
kjamesh's picture
First Push
c5d8c2a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39741361141204834,
"min": 0.39249035716056824,
"max": 1.4532856941223145,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11909.69140625,
"min": 11862.62890625,
"max": 44086.875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989923.0,
"min": 29908.0,
"max": 989923.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989923.0,
"min": 29908.0,
"max": 989923.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5347685813903809,
"min": -0.17452983558177948,
"max": 0.5899375081062317,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 148.66566467285156,
"min": -41.88716125488281,
"max": 165.18251037597656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.027728794142603874,
"min": 0.020704900845885277,
"max": 0.28993433713912964,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.70860481262207,
"min": 5.631732940673828,
"max": 69.58424377441406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06950068172729863,
"min": 0.06431823449190337,
"max": 0.07379303142669637,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9730095441821809,
"min": 0.48822231280056083,
"max": 1.0733340622003502,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0159675119000548,
"min": 0.0005375435335923741,
"max": 0.0159675119000548,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2235451666007672,
"min": 0.007525609470293237,
"max": 0.23024166489873704,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.348426121985716e-06,
"min": 7.348426121985716e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010287796570780002,
"min": 0.00010287796570780002,
"max": 0.0034921117359627994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244944285714286,
"min": 0.10244944285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342922,
"min": 1.3886848,
"max": 2.4862022,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002546993414285715,
"min": 0.0002546993414285715,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003565790780000001,
"min": 0.003565790780000001,
"max": 0.11641731628,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.024407463148236275,
"min": 0.024407463148236275,
"max": 0.5852988958358765,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.34170448780059814,
"min": 0.34170448780059814,
"max": 4.097092151641846,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 348.08045977011494,
"min": 348.08045977011494,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30283.0,
"min": 16835.0,
"max": 33561.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6079636156897654,
"min": -0.9999871489501768,
"max": 1.6450246657118386,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 141.50079818069935,
"min": -30.999601617455482,
"max": 141.50079818069935,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6079636156897654,
"min": -0.9999871489501768,
"max": 1.6450246657118386,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 141.50079818069935,
"min": -30.999601617455482,
"max": 141.50079818069935,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.086326377301041,
"min": 0.086326377301041,
"max": 11.227535291629678,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.596721202491608,
"min": 7.52142452186672,
"max": 190.86809995770454,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747367502",
"python_version": "3.10.12 (main, Feb 4 2025, 14:57:36) [GCC 11.4.0]",
"command_line_arguments": "/home/gym/.local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747368319"
},
"total": 816.9601381419999,
"count": 1,
"self": 0.218148676999931,
"children": {
"run_training.setup": {
"total": 0.013524332999963917,
"count": 1,
"self": 0.013524332999963917
},
"TrainerController.start_learning": {
"total": 816.728465132,
"count": 1,
"self": 0.5930147309869653,
"children": {
"TrainerController._reset_env": {
"total": 1.228264455000044,
"count": 1,
"self": 1.228264455000044
},
"TrainerController.advance": {
"total": 814.8787214330132,
"count": 63705,
"self": 0.5560394840263143,
"children": {
"env_step": {
"total": 611.2338976199959,
"count": 63705,
"self": 573.3734891499769,
"children": {
"SubprocessEnvManager._take_step": {
"total": 37.48590162500534,
"count": 63705,
"self": 1.6912241119889586,
"children": {
"TorchPolicy.evaluate": {
"total": 35.79467751301638,
"count": 62546,
"self": 35.79467751301638
}
}
},
"workers": {
"total": 0.37450684501368414,
"count": 63705,
"self": 0.0,
"children": {
"worker_root": {
"total": 815.2489232009905,
"count": 63705,
"is_parallel": true,
"self": 281.63628776701626,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012136119999013317,
"count": 1,
"is_parallel": true,
"self": 0.0006440639999709674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005695479999303643,
"count": 8,
"is_parallel": true,
"self": 0.0005695479999303643
}
}
},
"UnityEnvironment.step": {
"total": 0.01830171100004918,
"count": 1,
"is_parallel": true,
"self": 0.00014197400003013172,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00012826400006815675,
"count": 1,
"is_parallel": true,
"self": 0.00012826400006815675
},
"communicator.exchange": {
"total": 0.017659118999972634,
"count": 1,
"is_parallel": true,
"self": 0.017659118999972634
},
"steps_from_proto": {
"total": 0.0003723539999782588,
"count": 1,
"is_parallel": true,
"self": 9.316300008777034e-05,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00027919099989048846,
"count": 8,
"is_parallel": true,
"self": 0.00027919099989048846
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 533.6126354339742,
"count": 63704,
"is_parallel": true,
"self": 7.515094199031523,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.0651114539650735,
"count": 63704,
"is_parallel": true,
"self": 5.0651114539650735
},
"communicator.exchange": {
"total": 500.51337489298567,
"count": 63704,
"is_parallel": true,
"self": 500.51337489298567
},
"steps_from_proto": {
"total": 20.519054887991956,
"count": 63704,
"is_parallel": true,
"self": 4.689604202042119,
"children": {
"_process_rank_one_or_two_observation": {
"total": 15.829450685949837,
"count": 509632,
"is_parallel": true,
"self": 15.829450685949837
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 203.08878432899098,
"count": 63705,
"self": 1.1527257959716053,
"children": {
"process_trajectory": {
"total": 40.00459650002142,
"count": 63705,
"self": 39.93416578702147,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07043071299995063,
"count": 2,
"self": 0.07043071299995063
}
}
},
"_update_policy": {
"total": 161.93146203299796,
"count": 452,
"self": 85.87127631300405,
"children": {
"TorchPPOOptimizer.update": {
"total": 76.06018571999391,
"count": 22827,
"self": 76.06018571999391
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.75999968330143e-07,
"count": 1,
"self": 4.75999968330143e-07
},
"TrainerController._save_models": {
"total": 0.028464036999821474,
"count": 1,
"self": 0.0006103279999933875,
"children": {
"RLTrainer._checkpoint": {
"total": 0.027853708999828086,
"count": 1,
"self": 0.027853708999828086
}
}
}
}
}
}
}