ppo-Pyramid / run_logs /timers.json
toastedshibe's picture
Pyramid
c655e2e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.29457712173461914,
"min": 0.28781238198280334,
"max": 1.4766510725021362,
"count": 55
},
"Pyramids.Policy.Entropy.sum": {
"value": 8846.740234375,
"min": 8657.396484375,
"max": 44795.6875,
"count": 55
},
"Pyramids.Step.mean": {
"value": 1649957.0,
"min": 29922.0,
"max": 1649957.0,
"count": 55
},
"Pyramids.Step.sum": {
"value": 1649957.0,
"min": 29922.0,
"max": 1649957.0,
"count": 55
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6708931922912598,
"min": -0.20482537150382996,
"max": 0.7297143340110779,
"count": 55
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 187.85009765625,
"min": -48.54361343383789,
"max": 215.9954376220703,
"count": 55
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007760875392705202,
"min": -0.005171722732484341,
"max": 0.24000464379787445,
"count": 55
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.1730451583862305,
"min": -1.530829906463623,
"max": 56.881099700927734,
"count": 55
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06887802394833031,
"min": 0.0651892407025088,
"max": 0.07452423412178177,
"count": 55
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9642923352766244,
"min": 0.5867417861370261,
"max": 1.0566623151535168,
"count": 55
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011469048573249097,
"min": 0.00016149284553477575,
"max": 0.015445464763269271,
"count": 55
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16056668002548735,
"min": 0.0019379141464173088,
"max": 0.2210463398854093,
"count": 55
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00013648208307741907,
"min": 0.00013648208307741907,
"max": 0.00029828112557295835,
"count": 55
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0019107491630838672,
"min": 0.0019107491630838672,
"max": 0.0038014766328411664,
"count": 55
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14549400952380953,
"min": 0.14549400952380953,
"max": 0.19942704166666667,
"count": 55
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.0369161333333334,
"min": 1.5954163333333333,
"max": 2.6671588333333336,
"count": 55
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00455485155142857,
"min": 0.00455485155142857,
"max": 0.0099427614625,
"count": 55
},
"Pyramids.Policy.Beta.sum": {
"value": 0.06376792171999998,
"min": 0.06376792171999998,
"max": 0.12672916744999999,
"count": 55
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004840393550693989,
"min": 0.004840393550693989,
"max": 0.3451967239379883,
"count": 55
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06776551157236099,
"min": 0.06776551157236099,
"max": 2.7615737915039062,
"count": 55
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 284.7722772277228,
"min": 266.74285714285713,
"max": 999.0,
"count": 55
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28762.0,
"min": 16465.0,
"max": 32532.0,
"count": 55
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.655805921052942,
"min": -0.999962551984936,
"max": 1.7332571344716208,
"count": 55
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 167.23639802634716,
"min": -31.998801663517952,
"max": 198.33279847353697,
"count": 55
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.655805921052942,
"min": -0.999962551984936,
"max": 1.7332571344716208,
"count": 55
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 167.23639802634716,
"min": -31.998801663517952,
"max": 198.33279847353697,
"count": 55
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.014311761045289608,
"min": 0.013824607029380138,
"max": 7.877579272669904,
"count": 55
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.4454878655742505,
"min": 1.4454878655742505,
"max": 133.91884763538837,
"count": 55
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 55
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 55
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673546546",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673550340"
},
"total": 3794.328383009,
"count": 1,
"self": 0.31388458899937177,
"children": {
"run_training.setup": {
"total": 0.10422475700033829,
"count": 1,
"self": 0.10422475700033829
},
"TrainerController.start_learning": {
"total": 3793.910273663,
"count": 1,
"self": 2.923829951861535,
"children": {
"TrainerController._reset_env": {
"total": 6.398805469000308,
"count": 1,
"self": 6.398805469000308
},
"TrainerController.advance": {
"total": 3784.424016544139,
"count": 106655,
"self": 3.118006187951778,
"children": {
"env_step": {
"total": 2637.887846124183,
"count": 106655,
"self": 2410.9498444851783,
"children": {
"SubprocessEnvManager._take_step": {
"total": 224.97692890794679,
"count": 106655,
"self": 8.652331628967204,
"children": {
"TorchPolicy.evaluate": {
"total": 216.32459727897958,
"count": 104079,
"self": 73.65293842995698,
"children": {
"TorchPolicy.sample_actions": {
"total": 142.6716588490226,
"count": 104079,
"self": 142.6716588490226
}
}
}
}
},
"workers": {
"total": 1.9610727310578113,
"count": 106654,
"self": 0.0,
"children": {
"worker_root": {
"total": 3784.124142800061,
"count": 106654,
"is_parallel": true,
"self": 1583.951862792128,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017226380000465724,
"count": 1,
"is_parallel": true,
"self": 0.0006288409999797295,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010937970000668429,
"count": 8,
"is_parallel": true,
"self": 0.0010937970000668429
}
}
},
"UnityEnvironment.step": {
"total": 0.04294676100016659,
"count": 1,
"is_parallel": true,
"self": 0.0005037719997744716,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044570699992618756,
"count": 1,
"is_parallel": true,
"self": 0.00044570699992618756
},
"communicator.exchange": {
"total": 0.040371760000198265,
"count": 1,
"is_parallel": true,
"self": 0.040371760000198265
},
"steps_from_proto": {
"total": 0.0016255220002676651,
"count": 1,
"is_parallel": true,
"self": 0.00042170000006080954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012038220002068556,
"count": 8,
"is_parallel": true,
"self": 0.0012038220002068556
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2200.172280007933,
"count": 106653,
"is_parallel": true,
"self": 54.045965264194365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 41.60172945492968,
"count": 106653,
"is_parallel": true,
"self": 41.60172945492968
},
"communicator.exchange": {
"total": 1915.218170609996,
"count": 106653,
"is_parallel": true,
"self": 1915.218170609996
},
"steps_from_proto": {
"total": 189.30641467881287,
"count": 106653,
"is_parallel": true,
"self": 44.01956417315887,
"children": {
"_process_rank_one_or_two_observation": {
"total": 145.286850505654,
"count": 853224,
"is_parallel": true,
"self": 145.286850505654
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1143.418164232004,
"count": 106654,
"self": 5.9761474510478365,
"children": {
"process_trajectory": {
"total": 260.16288358195106,
"count": 106654,
"self": 259.8498919579506,
"children": {
"RLTrainer._checkpoint": {
"total": 0.31299162400046043,
"count": 3,
"self": 0.31299162400046043
}
}
},
"_update_policy": {
"total": 877.2791331990052,
"count": 752,
"self": 331.98236177111175,
"children": {
"TorchPPOOptimizer.update": {
"total": 545.2967714278934,
"count": 37965,
"self": 545.2967714278934
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5619998521287926e-06,
"count": 1,
"self": 1.5619998521287926e-06
},
"TrainerController._save_models": {
"total": 0.1636201359997358,
"count": 1,
"self": 0.0019687749991135206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16165136100062227,
"count": 1,
"self": 0.16165136100062227
}
}
}
}
}
}
}