ppo-Pyramids / run_logs /timers.json
Haricot24601's picture
First Commit
359412d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4370691180229187,
"min": 0.3953787386417389,
"max": 1.482421636581421,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13314.8740234375,
"min": 11766.4716796875,
"max": 44970.7421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49744758009910583,
"min": -0.16705232858657837,
"max": 0.5302248001098633,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 135.80319213867188,
"min": -39.591400146484375,
"max": 146.342041015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.027775047346949577,
"min": -0.047142673283815384,
"max": 0.19150181114673615,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -7.582587718963623,
"min": -13.011377334594727,
"max": 46.34343719482422,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06923712957546889,
"min": 0.06705989892325001,
"max": 0.07478817360347902,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9693198140565644,
"min": 0.4821412061072147,
"max": 1.0667286054231224,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017487687191263,
"min": 0.00067799773811616,
"max": 0.017487687191263,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.244827620677682,
"min": 0.009491968333626241,
"max": 0.244827620677682,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.653226020385714e-06,
"min": 7.653226020385714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001071451642854,
"min": 0.0001071451642854,
"max": 0.003508077530640899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255104285714288,
"min": 0.10255104285714288,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357146000000003,
"min": 1.3886848,
"max": 2.5726598999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026484918142857144,
"min": 0.00026484918142857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00370788854,
"min": 0.00370788854,
"max": 0.11695897409,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011264032684266567,
"min": 0.011264032684266567,
"max": 0.350700318813324,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1576964557170868,
"min": 0.1576964557170868,
"max": 2.454902172088623,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 376.2692307692308,
"min": 340.88461538461536,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29349.0,
"min": 15984.0,
"max": 33061.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.521143573216903,
"min": -1.0000000521540642,
"max": 1.607820487748354,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 118.64919871091843,
"min": -29.86980164051056,
"max": 131.5607987716794,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.521143573216903,
"min": -1.0000000521540642,
"max": 1.607820487748354,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 118.64919871091843,
"min": -29.86980164051056,
"max": 131.5607987716794,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04397268599113419,
"min": 0.040427896071658045,
"max": 6.6636851988732815,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4298695073084673,
"min": 3.1533758935893275,
"max": 106.6189631819725,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743406893",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743409105"
},
"total": 2211.829376175,
"count": 1,
"self": 0.5766778919996796,
"children": {
"run_training.setup": {
"total": 0.02069739000012305,
"count": 1,
"self": 0.02069739000012305
},
"TrainerController.start_learning": {
"total": 2211.232000893,
"count": 1,
"self": 1.3221066450428225,
"children": {
"TrainerController._reset_env": {
"total": 2.5621537959998477,
"count": 1,
"self": 2.5621537959998477
},
"TrainerController.advance": {
"total": 2207.260561658958,
"count": 63777,
"self": 1.4149359319735595,
"children": {
"env_step": {
"total": 1525.115483318982,
"count": 63777,
"self": 1368.0705643970418,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.24488574995485,
"count": 63777,
"self": 4.709928742895954,
"children": {
"TorchPolicy.evaluate": {
"total": 151.5349570070589,
"count": 62571,
"self": 151.5349570070589
}
}
},
"workers": {
"total": 0.8000331719852056,
"count": 63777,
"self": 0.0,
"children": {
"worker_root": {
"total": 2206.5199119400045,
"count": 63777,
"is_parallel": true,
"self": 950.0668551059418,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022621880000315286,
"count": 1,
"is_parallel": true,
"self": 0.000794584000004761,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014676040000267676,
"count": 8,
"is_parallel": true,
"self": 0.0014676040000267676
}
}
},
"UnityEnvironment.step": {
"total": 0.10414406200015947,
"count": 1,
"is_parallel": true,
"self": 0.0005853010000009817,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004971460000433581,
"count": 1,
"is_parallel": true,
"self": 0.0004971460000433581
},
"communicator.exchange": {
"total": 0.10128365700006725,
"count": 1,
"is_parallel": true,
"self": 0.10128365700006725
},
"steps_from_proto": {
"total": 0.0017779580000478745,
"count": 1,
"is_parallel": true,
"self": 0.00042222800038871355,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001355729999659161,
"count": 8,
"is_parallel": true,
"self": 0.001355729999659161
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.4530568340626,
"count": 63776,
"is_parallel": true,
"self": 31.74224129909885,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.251297472972283,
"count": 63776,
"is_parallel": true,
"self": 23.251297472972283
},
"communicator.exchange": {
"total": 1103.5011653820306,
"count": 63776,
"is_parallel": true,
"self": 1103.5011653820306
},
"steps_from_proto": {
"total": 97.95835267996085,
"count": 63776,
"is_parallel": true,
"self": 19.801755019970187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.15659765999067,
"count": 510208,
"is_parallel": true,
"self": 78.15659765999067
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 680.7301424080026,
"count": 63777,
"self": 2.5861683159864697,
"children": {
"process_trajectory": {
"total": 129.20662009802095,
"count": 63777,
"self": 129.0054291100207,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20119098800023494,
"count": 2,
"self": 0.20119098800023494
}
}
},
"_update_policy": {
"total": 548.9373539939952,
"count": 451,
"self": 301.99861138099277,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.93874261300243,
"count": 22809,
"self": 246.93874261300243
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0989997463184409e-06,
"count": 1,
"self": 1.0989997463184409e-06
},
"TrainerController._save_models": {
"total": 0.0871776939998199,
"count": 1,
"self": 0.0013061459999335057,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08587154799988639,
"count": 1,
"self": 0.08587154799988639
}
}
}
}
}
}
}