ppo-PyramidsRND / run_logs /timers.json
PeterDerLustige's picture
First training of PyramidsRND
b893dc1
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3956897258758545,
"min": 0.3956897258758545,
"max": 1.4941805601119995,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11902.3466796875,
"min": 11902.3466796875,
"max": 45327.4609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5976189970970154,
"min": -0.16592659056186676,
"max": 0.659978449344635,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 170.3214111328125,
"min": -39.32460021972656,
"max": 192.71371459960938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00884364452213049,
"min": -0.005917373578995466,
"max": 0.32431238889694214,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.5204386711120605,
"min": -1.562186598777771,
"max": 76.8620376586914,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06870910295207674,
"min": 0.06422407848969837,
"max": 0.07834014249162684,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0306365442811511,
"min": 0.48986797516375385,
"max": 1.0529297040193342,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016497523099331647,
"min": 8.481826073191226e-05,
"max": 0.016778614658202667,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24746284648997469,
"min": 0.0011874556502467716,
"max": 0.24746284648997469,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.529057490346665e-06,
"min": 7.529057490346665e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011293586235519999,
"min": 0.00011293586235519999,
"max": 0.0033835424721526,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250965333333334,
"min": 0.10250965333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376448,
"min": 1.3691136000000002,
"max": 2.5278473999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026071436799999994,
"min": 0.00026071436799999994,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003910715519999999,
"min": 0.003910715519999999,
"max": 0.11281195525999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01012395415455103,
"min": 0.01012395415455103,
"max": 0.38589438796043396,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.151859313249588,
"min": 0.151859313249588,
"max": 2.701260805130005,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 316.11340206185565,
"min": 294.4339622641509,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30663.0,
"min": 15984.0,
"max": 32443.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6835918212119414,
"min": -1.0000000521540642,
"max": 1.686038085463501,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 164.99199847877026,
"min": -32.000001668930054,
"max": 177.03399897366762,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6835918212119414,
"min": -1.0000000521540642,
"max": 1.686038085463501,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 164.99199847877026,
"min": -32.000001668930054,
"max": 177.03399897366762,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03329155694444459,
"min": 0.03329155694444459,
"max": 7.5337823908776045,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2625725805555703,
"min": 3.2625725805555703,
"max": 120.54051825404167,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675330723",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675332739"
},
"total": 2016.2061604839996,
"count": 1,
"self": 0.42615151799964224,
"children": {
"run_training.setup": {
"total": 0.10065187300006073,
"count": 1,
"self": 0.10065187300006073
},
"TrainerController.start_learning": {
"total": 2015.679357093,
"count": 1,
"self": 1.1503514329883728,
"children": {
"TrainerController._reset_env": {
"total": 6.004396050999958,
"count": 1,
"self": 6.004396050999958
},
"TrainerController.advance": {
"total": 2008.4408932640115,
"count": 63859,
"self": 1.3091010419805116,
"children": {
"env_step": {
"total": 1362.884127927004,
"count": 63859,
"self": 1263.8173869880284,
"children": {
"SubprocessEnvManager._take_step": {
"total": 98.35466166303479,
"count": 63859,
"self": 4.057708405019866,
"children": {
"TorchPolicy.evaluate": {
"total": 94.29695325801492,
"count": 62573,
"self": 31.957781293994685,
"children": {
"TorchPolicy.sample_actions": {
"total": 62.33917196402024,
"count": 62573,
"self": 62.33917196402024
}
}
}
}
},
"workers": {
"total": 0.7120792759410506,
"count": 63859,
"self": 0.0,
"children": {
"worker_root": {
"total": 2012.6690067770094,
"count": 63859,
"is_parallel": true,
"self": 841.7997528939816,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016824000000497108,
"count": 1,
"is_parallel": true,
"self": 0.0006225720001111767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001059827999938534,
"count": 8,
"is_parallel": true,
"self": 0.001059827999938534
}
}
},
"UnityEnvironment.step": {
"total": 0.04380048399991665,
"count": 1,
"is_parallel": true,
"self": 0.0005185409999057811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043306400004894385,
"count": 1,
"is_parallel": true,
"self": 0.00043306400004894385
},
"communicator.exchange": {
"total": 0.04134952599997632,
"count": 1,
"is_parallel": true,
"self": 0.04134952599997632
},
"steps_from_proto": {
"total": 0.0014993529999856037,
"count": 1,
"is_parallel": true,
"self": 0.00038592599992171017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011134270000638935,
"count": 8,
"is_parallel": true,
"self": 0.0011134270000638935
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1170.8692538830278,
"count": 63858,
"is_parallel": true,
"self": 26.877096999966398,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.272695705021988,
"count": 63858,
"is_parallel": true,
"self": 21.272695705021988
},
"communicator.exchange": {
"total": 1035.3610043580538,
"count": 63858,
"is_parallel": true,
"self": 1035.3610043580538
},
"steps_from_proto": {
"total": 87.35845681998558,
"count": 63858,
"is_parallel": true,
"self": 20.295796612939398,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.06266020704618,
"count": 510864,
"is_parallel": true,
"self": 67.06266020704618
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 644.247664295027,
"count": 63859,
"self": 2.084852871008593,
"children": {
"process_trajectory": {
"total": 138.8640985420202,
"count": 63859,
"self": 138.6777656490201,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18633289300009892,
"count": 2,
"self": 0.18633289300009892
}
}
},
"_update_policy": {
"total": 503.2987128819982,
"count": 440,
"self": 189.495948427989,
"children": {
"TorchPPOOptimizer.update": {
"total": 313.8027644540092,
"count": 22806,
"self": 313.8027644540092
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.300001693191007e-07,
"count": 1,
"self": 9.300001693191007e-07
},
"TrainerController._save_models": {
"total": 0.08371541499991508,
"count": 1,
"self": 0.0019149570002809924,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08180045799963409,
"count": 1,
"self": 0.08180045799963409
}
}
}
}
}
}
}