ppo-Pyramids / run_logs /timers.json
ALEXIOSTER's picture
First Push
d79963b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6516154408454895,
"min": 0.6516154408454895,
"max": 1.3695513010025024,
"count": 20
},
"Pyramids.Policy.Entropy.sum": {
"value": 32747.5859375,
"min": 32747.5859375,
"max": 68543.3046875,
"count": 20
},
"Pyramids.Step.mean": {
"value": 999996.0,
"min": 49920.0,
"max": 999996.0,
"count": 20
},
"Pyramids.Step.sum": {
"value": 999996.0,
"min": 49920.0,
"max": 999996.0,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19308699667453766,
"min": -0.10990340262651443,
"max": 0.19308699667453766,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 81.67579650878906,
"min": -44.18116760253906,
"max": 81.67579650878906,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05419841781258583,
"min": 0.012073390185832977,
"max": 0.5087637305259705,
"count": 20
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 22.92593002319336,
"min": 4.998383522033691,
"max": 202.99671936035156,
"count": 20
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06960356315130709,
"min": 0.0671624880115491,
"max": 0.0729492178419177,
"count": 20
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.67048551563137,
"min": 0.869867088534433,
"max": 1.7507812282060247,
"count": 20
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008700413769161267,
"min": 0.00038179419750892525,
"max": 0.00892270895195701,
"count": 20
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2088099304598704,
"min": 0.008781266542705281,
"max": 0.2088099304598704,
"count": 20
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.303547565516665e-06,
"min": 7.303547565516665e-06,
"max": 0.00029215680261440003,
"count": 20
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00017528514157239995,
"min": 0.00017528514157239995,
"max": 0.0056913409028865,
"count": 20
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243448333333334,
"min": 0.10243448333333334,
"max": 0.19738560000000002,
"count": 20
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.4584276000000003,
"min": 2.3686272,
"max": 4.1971135,
"count": 20
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002532048849999999,
"min": 0.0002532048849999999,
"max": 0.00973882144,
"count": 20
},
"Pyramids.Policy.Beta.sum": {
"value": 0.006076917239999998,
"min": 0.006076917239999998,
"max": 0.18975163865,
"count": 20
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01213915180414915,
"min": 0.01213915180414915,
"max": 0.47243833541870117,
"count": 20
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.291339635848999,
"min": 0.291339635848999,
"max": 5.669260025024414,
"count": 20
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 651.3815789473684,
"min": 644.3466666666667,
"max": 999.0,
"count": 20
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 49505.0,
"min": 47310.0,
"max": 57237.0,
"count": 20
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.6426181529055942,
"min": -1.0000000521540642,
"max": 0.7926945571561117,
"count": 20
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 49.481597773730755,
"min": -48.00000250339508,
"max": 58.65939722955227,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.6426181529055942,
"min": -1.0000000521540642,
"max": 0.7926945571561117,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 49.481597773730755,
"min": -48.00000250339508,
"max": 58.65939722955227,
"count": 20
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08190511345603298,
"min": 0.08190511345603298,
"max": 6.73013247239093,
"count": 20
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.306693736114539,
"min": 6.071258629439399,
"max": 323.04635867476463,
"count": 20
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741958978",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741960947"
},
"total": 1968.7219446800002,
"count": 1,
"self": 0.4920262670000284,
"children": {
"run_training.setup": {
"total": 0.028782276000015372,
"count": 1,
"self": 0.028782276000015372
},
"TrainerController.start_learning": {
"total": 1968.201136137,
"count": 1,
"self": 1.2164956279416401,
"children": {
"TrainerController._reset_env": {
"total": 3.7129245959999935,
"count": 1,
"self": 3.7129245959999935
},
"TrainerController.advance": {
"total": 1963.1855599640585,
"count": 63245,
"self": 1.2821281930550867,
"children": {
"env_step": {
"total": 1293.4564528939845,
"count": 63245,
"self": 1148.2014419070165,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.55106448198478,
"count": 63245,
"self": 4.3794781590310095,
"children": {
"TorchPolicy.evaluate": {
"total": 140.17158632295377,
"count": 62577,
"self": 140.17158632295377
}
}
},
"workers": {
"total": 0.703946504983378,
"count": 63245,
"self": 0.0,
"children": {
"worker_root": {
"total": 1963.8637564619735,
"count": 63245,
"is_parallel": true,
"self": 919.2626604509492,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005537124000056792,
"count": 1,
"is_parallel": true,
"self": 0.004230232999816508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013068910002402845,
"count": 8,
"is_parallel": true,
"self": 0.0013068910002402845
}
}
},
"UnityEnvironment.step": {
"total": 0.04769638899995243,
"count": 1,
"is_parallel": true,
"self": 0.0005155309999054225,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043939200008935586,
"count": 1,
"is_parallel": true,
"self": 0.00043939200008935586
},
"communicator.exchange": {
"total": 0.0450099140000475,
"count": 1,
"is_parallel": true,
"self": 0.0450099140000475
},
"steps_from_proto": {
"total": 0.0017315519999101525,
"count": 1,
"is_parallel": true,
"self": 0.0004989530000329978,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012325989998771547,
"count": 8,
"is_parallel": true,
"self": 0.0012325989998771547
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1044.6010960110243,
"count": 63244,
"is_parallel": true,
"self": 31.132613406001383,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.71871341001986,
"count": 63244,
"is_parallel": true,
"self": 22.71871341001986
},
"communicator.exchange": {
"total": 899.5944875310203,
"count": 63244,
"is_parallel": true,
"self": 899.5944875310203
},
"steps_from_proto": {
"total": 91.1552816639828,
"count": 63244,
"is_parallel": true,
"self": 17.837325306835623,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.31795635714718,
"count": 505952,
"is_parallel": true,
"self": 73.31795635714718
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 668.4469788770188,
"count": 63245,
"self": 2.3642319080736343,
"children": {
"process_trajectory": {
"total": 122.48483953394452,
"count": 63245,
"self": 122.25191112094444,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2329284130000815,
"count": 2,
"self": 0.2329284130000815
}
}
},
"_update_policy": {
"total": 543.5979074350006,
"count": 441,
"self": 298.04100385401364,
"children": {
"TorchPPOOptimizer.update": {
"total": 245.55690358098695,
"count": 22902,
"self": 245.55690358098695
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.059999683813658e-07,
"count": 1,
"self": 9.059999683813658e-07
},
"TrainerController._save_models": {
"total": 0.08615504299996246,
"count": 1,
"self": 0.001309593000314635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08484544999964783,
"count": 1,
"self": 0.08484544999964783
}
}
}
}
}
}
}