ppo-PyramidRND / run_logs /timers.json
heidragon3045's picture
First Training of PyramidRND
ea3cdb3
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.567354679107666,
"min": 0.567354679107666,
"max": 1.5097726583480835,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17020.640625,
"min": 17020.640625,
"max": 45800.46484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989985.0,
"min": 29952.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989985.0,
"min": 29952.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3195567727088928,
"min": -0.09948308020830154,
"max": 0.3195567727088928,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 81.48697662353516,
"min": -23.975421905517578,
"max": 81.48697662353516,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.8402900099754333,
"min": -0.8402900099754333,
"max": 0.38558143377304077,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -214.27395629882812,
"min": -214.27395629882812,
"max": 99.48001098632812,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0684488264675666,
"min": 0.06584209471544626,
"max": 0.07507871512935206,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.026732397013499,
"min": 0.4864273549207412,
"max": 1.0723875221447086,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.11867173352382249,
"min": 5.754345499305599e-05,
"max": 0.11867173352382249,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 1.7800760028573372,
"min": 0.0007480649149097278,
"max": 1.7800760028573372,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.436657521146666e-06,
"min": 7.436657521146666e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001115498628172,
"min": 0.0001115498628172,
"max": 0.0035077178307608,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247885333333334,
"min": 0.10247885333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371828,
"min": 1.3886848,
"max": 2.5692391999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025763744800000007,
"min": 0.00025763744800000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003864561720000001,
"min": 0.003864561720000001,
"max": 0.11694699608,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010589645244181156,
"min": 0.010589645244181156,
"max": 0.3604753911495209,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15884467959403992,
"min": 0.15082339942455292,
"max": 2.5233278274536133,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 584.94,
"min": 527.6851851851852,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29247.0,
"min": 15984.0,
"max": 33551.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1349479669332505,
"min": -1.0000000521540642,
"max": 1.2231345184824682,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 56.74739834666252,
"min": -31.987201616168022,
"max": 67.52879796922207,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1349479669332505,
"min": -1.0000000521540642,
"max": 1.2231345184824682,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 56.74739834666252,
"min": -31.987201616168022,
"max": 67.52879796922207,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06401431587699336,
"min": 0.06044924449535054,
"max": 7.28570517199114,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2007157938496675,
"min": 3.2007157938496675,
"max": 116.57128275185823,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680854781",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680856798"
},
"total": 2017.467263864,
"count": 1,
"self": 0.8463550510000459,
"children": {
"run_training.setup": {
"total": 0.11600347800003874,
"count": 1,
"self": 0.11600347800003874
},
"TrainerController.start_learning": {
"total": 2016.5049053349999,
"count": 1,
"self": 1.432618126049647,
"children": {
"TrainerController._reset_env": {
"total": 4.123837328000036,
"count": 1,
"self": 4.123837328000036
},
"TrainerController.advance": {
"total": 2010.80086303195,
"count": 63388,
"self": 1.4757538129961176,
"children": {
"env_step": {
"total": 1394.3680518869553,
"count": 63388,
"self": 1287.10601816496,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.41884746996152,
"count": 63388,
"self": 4.725508292934592,
"children": {
"TorchPolicy.evaluate": {
"total": 101.69333917702693,
"count": 62558,
"self": 101.69333917702693
}
}
},
"workers": {
"total": 0.8431862520337745,
"count": 63388,
"self": 0.0,
"children": {
"worker_root": {
"total": 2011.462517325956,
"count": 63388,
"is_parallel": true,
"self": 835.0931142940215,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018826200000603421,
"count": 1,
"is_parallel": true,
"self": 0.0006436910000502394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012389290000101028,
"count": 8,
"is_parallel": true,
"self": 0.0012389290000101028
}
}
},
"UnityEnvironment.step": {
"total": 0.1008964930001639,
"count": 1,
"is_parallel": true,
"self": 0.0005205810000461497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045323800009100523,
"count": 1,
"is_parallel": true,
"self": 0.00045323800009100523
},
"communicator.exchange": {
"total": 0.09830743899988192,
"count": 1,
"is_parallel": true,
"self": 0.09830743899988192
},
"steps_from_proto": {
"total": 0.0016152350001448212,
"count": 1,
"is_parallel": true,
"self": 0.0003743830000075832,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001240852000137238,
"count": 8,
"is_parallel": true,
"self": 0.001240852000137238
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1176.3694030319346,
"count": 63387,
"is_parallel": true,
"self": 32.1670963778422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.600417367047612,
"count": 63387,
"is_parallel": true,
"self": 22.600417367047612
},
"communicator.exchange": {
"total": 1028.9116710230712,
"count": 63387,
"is_parallel": true,
"self": 1028.9116710230712
},
"steps_from_proto": {
"total": 92.69021826397352,
"count": 63387,
"is_parallel": true,
"self": 19.769331865938966,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.92088639803455,
"count": 507096,
"is_parallel": true,
"self": 72.92088639803455
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 614.9570573319986,
"count": 63388,
"self": 2.656180250017769,
"children": {
"process_trajectory": {
"total": 101.7046727179843,
"count": 63388,
"self": 101.44698761598443,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2576851019998685,
"count": 2,
"self": 0.2576851019998685
}
}
},
"_update_policy": {
"total": 510.59620436399655,
"count": 450,
"self": 326.6329011529758,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.96330321102073,
"count": 22764,
"self": 183.96330321102073
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1699999049596954e-06,
"count": 1,
"self": 1.1699999049596954e-06
},
"TrainerController._save_models": {
"total": 0.1475856790002581,
"count": 1,
"self": 0.001886413000192988,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1456992660000651,
"count": 1,
"self": 0.1456992660000651
}
}
}
}
}
}
}