ppo-Pyramids / run_logs /timers.json
stoyky's picture
Initial commit
d0b5162
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5000620484352112,
"min": 0.49057406187057495,
"max": 1.4615569114685059,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15161.8818359375,
"min": 14701.5234375,
"max": 44337.7890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29954.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29954.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2295708954334259,
"min": -0.09694843739271164,
"max": 0.2730889320373535,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 58.77014923095703,
"min": -23.26762580871582,
"max": 69.9107666015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.003302262630313635,
"min": -0.003302262630313635,
"max": 0.2535272240638733,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.8453792333602905,
"min": -0.8453792333602905,
"max": 60.846534729003906,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0702798923447856,
"min": 0.06541337682785668,
"max": 0.07303880893525128,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.054198385171784,
"min": 0.5843104714820102,
"max": 1.054198385171784,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010745456507501767,
"min": 0.000162480631420848,
"max": 0.010883919521542596,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1611818476125265,
"min": 0.002112248208471024,
"max": 0.1611818476125265,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.547397484233329e-06,
"min": 7.547397484233329e-06,
"max": 0.00029476755174414997,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011321096226349994,
"min": 0.00011321096226349994,
"max": 0.0035083451305517,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251576666666667,
"min": 0.10251576666666667,
"max": 0.19825585,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377365,
"min": 1.4784685000000004,
"max": 2.5694483,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026132508999999993,
"min": 0.00026132508999999993,
"max": 0.009825759415,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003919876349999999,
"min": 0.003919876349999999,
"max": 0.11696788517000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010395408608019352,
"min": 0.010360405780375004,
"max": 0.3968595862388611,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15593113005161285,
"min": 0.1450456827878952,
"max": 3.1748766899108887,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 608.4897959183673,
"min": 575.425925925926,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29816.0,
"min": 17392.0,
"max": 32333.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9275791350131234,
"min": -0.9999871489501768,
"max": 1.0260072423653168,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 44.52379848062992,
"min": -30.999601617455482,
"max": 56.43039833009243,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9275791350131234,
"min": -0.9999871489501768,
"max": 1.0260072423653168,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 44.52379848062992,
"min": -30.999601617455482,
"max": 56.43039833009243,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06553883639056342,
"min": 0.06441820171661675,
"max": 7.5778723408778506,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1458641467470443,
"min": 3.1458641467470443,
"max": 136.40170213580132,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691776410",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691779176"
},
"total": 2766.3009188180004,
"count": 1,
"self": 0.5395321300002252,
"children": {
"run_training.setup": {
"total": 0.04610425800001394,
"count": 1,
"self": 0.04610425800001394
},
"TrainerController.start_learning": {
"total": 2765.71528243,
"count": 1,
"self": 2.3189590150541335,
"children": {
"TrainerController._reset_env": {
"total": 4.736361883999962,
"count": 1,
"self": 4.736361883999962
},
"TrainerController.advance": {
"total": 2758.546256546946,
"count": 63326,
"self": 2.3064922099883916,
"children": {
"env_step": {
"total": 1962.8787006870089,
"count": 63326,
"self": 1798.656258225059,
"children": {
"SubprocessEnvManager._take_step": {
"total": 162.85415631201,
"count": 63326,
"self": 6.768991848998212,
"children": {
"TorchPolicy.evaluate": {
"total": 156.08516446301178,
"count": 62560,
"self": 156.08516446301178
}
}
},
"workers": {
"total": 1.3682861499398769,
"count": 63326,
"self": 0.0,
"children": {
"worker_root": {
"total": 2758.550148785024,
"count": 63326,
"is_parallel": true,
"self": 1128.0369430550504,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002308504999973593,
"count": 1,
"is_parallel": true,
"self": 0.0007245520002925332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015839529996810597,
"count": 8,
"is_parallel": true,
"self": 0.0015839529996810597
}
}
},
"UnityEnvironment.step": {
"total": 0.06360701000016888,
"count": 1,
"is_parallel": true,
"self": 0.0007185090003076766,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005766300000686897,
"count": 1,
"is_parallel": true,
"self": 0.0005766300000686897
},
"communicator.exchange": {
"total": 0.05997464099982608,
"count": 1,
"is_parallel": true,
"self": 0.05997464099982608
},
"steps_from_proto": {
"total": 0.002337229999966439,
"count": 1,
"is_parallel": true,
"self": 0.00043891799964512757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018983120003213116,
"count": 8,
"is_parallel": true,
"self": 0.0018983120003213116
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1630.5132057299734,
"count": 63325,
"is_parallel": true,
"self": 44.618165950898856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.91287305703804,
"count": 63325,
"is_parallel": true,
"self": 30.91287305703804
},
"communicator.exchange": {
"total": 1408.3650206440443,
"count": 63325,
"is_parallel": true,
"self": 1408.3650206440443
},
"steps_from_proto": {
"total": 146.61714607799217,
"count": 63325,
"is_parallel": true,
"self": 30.506639138030778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 116.1105069399614,
"count": 506600,
"is_parallel": true,
"self": 116.1105069399614
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 793.3610636499491,
"count": 63326,
"self": 4.074597435909482,
"children": {
"process_trajectory": {
"total": 144.82234230404174,
"count": 63326,
"self": 144.57999845604172,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24234384800001862,
"count": 2,
"self": 0.24234384800001862
}
}
},
"_update_policy": {
"total": 644.4641239099979,
"count": 454,
"self": 420.7571544910156,
"children": {
"TorchPPOOptimizer.update": {
"total": 223.70696941898223,
"count": 22770,
"self": 223.70696941898223
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3010003385716118e-06,
"count": 1,
"self": 1.3010003385716118e-06
},
"TrainerController._save_models": {
"total": 0.113703682999585,
"count": 1,
"self": 0.0016883939997569541,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11201528899982804,
"count": 1,
"self": 0.11201528899982804
}
}
}
}
}
}
}