ppo-Pyramids / run_logs /timers.json
kerwin7's picture
First Push
1d25d09
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4876851439476013,
"min": 0.4876851439476013,
"max": 1.50128972530365,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14833.431640625,
"min": 14799.7392578125,
"max": 45543.125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29895.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29895.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3546045422554016,
"min": -0.09798877686262131,
"max": 0.40142837166786194,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 93.6156005859375,
"min": -23.51730728149414,
"max": 104.7728042602539,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0006557323504239321,
"min": -0.6742866039276123,
"max": 0.36897242069244385,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.17311334609985352,
"min": -176.6630859375,
"max": 87.44646453857422,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06944293210842009,
"min": 0.06384807716805133,
"max": 0.07255164059849102,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9722010495178812,
"min": 0.5667429176542654,
"max": 1.0238074502318768,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013588541115982494,
"min": 0.0006994268578100945,
"max": 0.052272423012851704,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1902395756237549,
"min": 0.006994268578100945,
"max": 0.7840863451927755,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.844561670892852e-06,
"min": 7.844561670892852e-06,
"max": 0.0002952118890960375,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010982386339249993,
"min": 0.00010982386339249993,
"max": 0.0033822335725888996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10261482142857141,
"min": 0.10261482142857141,
"max": 0.19840396249999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4366074999999998,
"min": 1.4366074999999998,
"max": 2.5274111,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002712206607142856,
"min": 0.0002712206607142856,
"max": 0.00984055585375,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037970892499999986,
"min": 0.0037970892499999986,
"max": 0.11276836889000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011304235085844994,
"min": 0.011304235085844994,
"max": 0.548721432685852,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1582592874765396,
"min": 0.1582592874765396,
"max": 4.389771461486816,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 452.7692307692308,
"min": 452.4923076923077,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29430.0,
"min": 16822.0,
"max": 33900.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3317784430888984,
"min": -1.0000000521540642,
"max": 1.3320461183786392,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 86.56559880077839,
"min": -32.000001668930054,
"max": 86.58299769461155,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3317784430888984,
"min": -1.0000000521540642,
"max": 1.3320461183786392,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 86.56559880077839,
"min": -32.000001668930054,
"max": 86.58299769461155,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.054127485288504085,
"min": 0.054127485288504085,
"max": 11.316670830635463,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5182865437527653,
"min": 3.5182865437527653,
"max": 192.38340412080288,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697078894",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1697082079"
},
"total": 3185.2569319990002,
"count": 1,
"self": 0.8846181320004689,
"children": {
"run_training.setup": {
"total": 0.05522893300002352,
"count": 1,
"self": 0.05522893300002352
},
"TrainerController.start_learning": {
"total": 3184.317084934,
"count": 1,
"self": 2.2248115750012403,
"children": {
"TrainerController._reset_env": {
"total": 1.1492572679999853,
"count": 1,
"self": 1.1492572679999853
},
"TrainerController.advance": {
"total": 3180.816438613999,
"count": 63491,
"self": 2.3292886979961622,
"children": {
"env_step": {
"total": 2100.423073387011,
"count": 63491,
"self": 1949.8338545579484,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.24951739701152,
"count": 63491,
"self": 6.961477949949142,
"children": {
"TorchPolicy.evaluate": {
"total": 142.28803944706237,
"count": 62553,
"self": 142.28803944706237
}
}
},
"workers": {
"total": 1.3397014320510152,
"count": 63491,
"self": 0.0,
"children": {
"worker_root": {
"total": 3177.9428118959295,
"count": 63491,
"is_parallel": true,
"self": 1398.4614989999384,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00552499700000908,
"count": 1,
"is_parallel": true,
"self": 0.003963348000127098,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015616489998819816,
"count": 8,
"is_parallel": true,
"self": 0.0015616489998819816
}
}
},
"UnityEnvironment.step": {
"total": 0.06099987800001827,
"count": 1,
"is_parallel": true,
"self": 0.000681524000015088,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003872560000104386,
"count": 1,
"is_parallel": true,
"self": 0.0003872560000104386
},
"communicator.exchange": {
"total": 0.05795901999999842,
"count": 1,
"is_parallel": true,
"self": 0.05795901999999842
},
"steps_from_proto": {
"total": 0.001972077999994326,
"count": 1,
"is_parallel": true,
"self": 0.000413078999940808,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001558999000053518,
"count": 8,
"is_parallel": true,
"self": 0.001558999000053518
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1779.481312895991,
"count": 63490,
"is_parallel": true,
"self": 46.46474271799002,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.786962413955735,
"count": 63490,
"is_parallel": true,
"self": 26.786962413955735
},
"communicator.exchange": {
"total": 1577.8154745120353,
"count": 63490,
"is_parallel": true,
"self": 1577.8154745120353
},
"steps_from_proto": {
"total": 128.41413325201,
"count": 63490,
"is_parallel": true,
"self": 27.594017199974076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 100.82011605203593,
"count": 507920,
"is_parallel": true,
"self": 100.82011605203593
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1078.064076528992,
"count": 63491,
"self": 4.136871907974182,
"children": {
"process_trajectory": {
"total": 153.4832396480196,
"count": 63491,
"self": 153.08848823701982,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3947514109997883,
"count": 2,
"self": 0.3947514109997883
}
}
},
"_update_policy": {
"total": 920.4439649729982,
"count": 443,
"self": 356.34656640999833,
"children": {
"TorchPPOOptimizer.update": {
"total": 564.0973985629998,
"count": 22797,
"self": 564.0973985629998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.711000095383497e-06,
"count": 1,
"self": 1.711000095383497e-06
},
"TrainerController._save_models": {
"total": 0.12657576599985987,
"count": 1,
"self": 0.002331227000013314,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12424453899984655,
"count": 1,
"self": 0.12424453899984655
}
}
}
}
}
}
}