ppo-Pyramids / run_logs /timers.json
UzzyDizzy's picture
First Push
dc10c18 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43407079577445984,
"min": 0.42527782917022705,
"max": 1.4837955236434937,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12904.056640625,
"min": 12737.921875,
"max": 45012.421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5767265558242798,
"min": -0.16844777762889862,
"max": 0.612508237361908,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 158.0230712890625,
"min": -39.922122955322266,
"max": 170.88980102539062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00850876234471798,
"min": -0.03281579911708832,
"max": 0.3009519577026367,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.3314008712768555,
"min": -8.860265731811523,
"max": 72.5294189453125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06775942262278736,
"min": 0.06507797840886805,
"max": 0.0718982202711881,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9486319167190231,
"min": 0.491452663355432,
"max": 1.065007424719321,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015864644856524786,
"min": 0.0006806989169103642,
"max": 0.0172680757417414,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.222105027991347,
"min": 0.008849085919834736,
"max": 0.2417530603843796,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4504475165499995e-06,
"min": 7.4504475165499995e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010430626523169999,
"min": 0.00010430626523169999,
"max": 0.0033817955727349,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248345000000002,
"min": 0.10248345000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347683000000002,
"min": 1.3886848,
"max": 2.5272651,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025809665500000007,
"min": 0.00025809665500000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036133531700000008,
"min": 0.0036133531700000008,
"max": 0.11275378348999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010653096251189709,
"min": 0.010653096251189709,
"max": 0.43323490023612976,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14914335310459137,
"min": 0.14914335310459137,
"max": 3.032644271850586,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 325.7977528089888,
"min": 288.0,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28996.0,
"min": 15984.0,
"max": 33925.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.651721326990074,
"min": -1.0000000521540642,
"max": 1.651721326990074,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.00319810211658,
"min": -28.266001626849174,
"max": 171.9449976682663,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.651721326990074,
"min": -1.0000000521540642,
"max": 1.651721326990074,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.00319810211658,
"min": -28.266001626849174,
"max": 171.9449976682663,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.035411360995220076,
"min": 0.035411360995220076,
"max": 8.571720628067851,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1516111285745865,
"min": 3.1516111285745865,
"max": 137.14753004908562,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754575990",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1754578211"
},
"total": 2221.377463368,
"count": 1,
"self": 0.7650903400003699,
"children": {
"run_training.setup": {
"total": 0.027368948999992426,
"count": 1,
"self": 0.027368948999992426
},
"TrainerController.start_learning": {
"total": 2220.5850040789996,
"count": 1,
"self": 1.346251471067717,
"children": {
"TrainerController._reset_env": {
"total": 2.5212945329999457,
"count": 1,
"self": 2.5212945329999457
},
"TrainerController.advance": {
"total": 2216.6031353579324,
"count": 63944,
"self": 1.3791782740390772,
"children": {
"env_step": {
"total": 1559.7259500259647,
"count": 63944,
"self": 1415.1660427130223,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.7468732469597,
"count": 63944,
"self": 4.429113454985327,
"children": {
"TorchPolicy.evaluate": {
"total": 139.31775979197437,
"count": 62550,
"self": 139.31775979197437
}
}
},
"workers": {
"total": 0.8130340659827198,
"count": 63944,
"self": 0.0,
"children": {
"worker_root": {
"total": 2215.7580473560615,
"count": 63944,
"is_parallel": true,
"self": 910.4688227340862,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025719969999045134,
"count": 1,
"is_parallel": true,
"self": 0.0008216759993047162,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017503210005997971,
"count": 8,
"is_parallel": true,
"self": 0.0017503210005997971
}
}
},
"UnityEnvironment.step": {
"total": 0.05092954599967925,
"count": 1,
"is_parallel": true,
"self": 0.0005536380003832164,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045113899977877736,
"count": 1,
"is_parallel": true,
"self": 0.00045113899977877736
},
"communicator.exchange": {
"total": 0.048274782999669696,
"count": 1,
"is_parallel": true,
"self": 0.048274782999669696
},
"steps_from_proto": {
"total": 0.0016499859998475586,
"count": 1,
"is_parallel": true,
"self": 0.00036246099898562534,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012875250008619332,
"count": 8,
"is_parallel": true,
"self": 0.0012875250008619332
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1305.2892246219753,
"count": 63943,
"is_parallel": true,
"self": 31.497336503966835,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.117139126020902,
"count": 63943,
"is_parallel": true,
"self": 22.117139126020902
},
"communicator.exchange": {
"total": 1158.5751216730123,
"count": 63943,
"is_parallel": true,
"self": 1158.5751216730123
},
"steps_from_proto": {
"total": 93.09962731897531,
"count": 63943,
"is_parallel": true,
"self": 18.511454564938504,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.5881727540368,
"count": 511544,
"is_parallel": true,
"self": 74.5881727540368
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.4980070579286,
"count": 63944,
"self": 2.587601174887368,
"children": {
"process_trajectory": {
"total": 123.31635473403867,
"count": 63944,
"self": 123.08747297803893,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22888175599973692,
"count": 2,
"self": 0.22888175599973692
}
}
},
"_update_policy": {
"total": 529.5940511490026,
"count": 449,
"self": 294.47535499496325,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.1186961540393,
"count": 22776,
"self": 235.1186961540393
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0399999155197293e-06,
"count": 1,
"self": 1.0399999155197293e-06
},
"TrainerController._save_models": {
"total": 0.11432167699967977,
"count": 1,
"self": 0.0018506549995436217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11247102200013614,
"count": 1,
"self": 0.11247102200013614
}
}
}
}
}
}
}