PyramidsRND / run_logs /timers.json
Artachtron's picture
First training of PyramidsRND
e27e58f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8323522210121155,
"min": 0.8323522210121155,
"max": 1.4688894748687744,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 24930.61328125,
"min": 24930.61328125,
"max": 44560.23046875,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89953.0,
"min": 29952.0,
"max": 89953.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89953.0,
"min": 29952.0,
"max": 89953.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09592054784297943,
"min": -0.09592054784297943,
"max": 0.03983438014984131,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -23.116851806640625,
"min": -23.116851806640625,
"max": 9.44074821472168,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.17069073021411896,
"min": 0.12402097135782242,
"max": 0.22428525984287262,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 41.13646697998047,
"min": 29.39297103881836,
"max": 53.828460693359375,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06733157473897167,
"min": 0.06733157473897167,
"max": 0.07045983251055002,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.7406473221286883,
"min": 0.4932188275738501,
"max": 0.7406473221286883,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0009421695563272336,
"min": 0.0009421695563272336,
"max": 0.004145141092810184,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.01036386511959957,
"min": 0.009659244228060963,
"max": 0.02901598764967129,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.818116484872727e-05,
"min": 7.818116484872727e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000859992813336,
"min": 0.000859992813336,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12606036363636364,
"min": 0.12606036363636364,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.3866640000000001,
"min": 1.2392880000000002,
"max": 1.3866640000000001,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.002613430327272728,
"min": 0.002613430327272728,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.028747733600000006,
"min": 0.028747733600000006,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.12022758275270462,
"min": 0.12022758275270462,
"max": 0.3930082619190216,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.3225034475326538,
"min": 1.3225034475326538,
"max": 2.7510578632354736,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 987.0909090909091,
"min": 987.0909090909091,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32574.0,
"min": 15984.0,
"max": 32574.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9274182326414369,
"min": -1.0000000521540642,
"max": -0.9274182326414369,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -30.604801677167416,
"min": -30.604801677167416,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9274182326414369,
"min": -1.0000000521540642,
"max": -0.9274182326414369,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -30.604801677167416,
"min": -30.604801677167416,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.3880127560008655,
"min": 1.3880127560008655,
"max": 7.517998921684921,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 45.804420948028564,
"min": 45.804420948028564,
"max": 120.28798274695873,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677257043",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677257259"
},
"total": 216.592328935,
"count": 1,
"self": 0.48901486199997635,
"children": {
"run_training.setup": {
"total": 0.1088189990000501,
"count": 1,
"self": 0.1088189990000501
},
"TrainerController.start_learning": {
"total": 215.99449507399999,
"count": 1,
"self": 0.1367197060016565,
"children": {
"TrainerController._reset_env": {
"total": 9.11554398300001,
"count": 1,
"self": 9.11554398300001
},
"TrainerController.advance": {
"total": 206.6333847529982,
"count": 6264,
"self": 0.1507085469962135,
"children": {
"env_step": {
"total": 129.7133109340047,
"count": 6264,
"self": 117.63257043099958,
"children": {
"SubprocessEnvManager._take_step": {
"total": 11.988092498002857,
"count": 6264,
"self": 0.5082369689985171,
"children": {
"TorchPolicy.evaluate": {
"total": 11.47985552900434,
"count": 6258,
"self": 3.9001356640038694,
"children": {
"TorchPolicy.sample_actions": {
"total": 7.57971986500047,
"count": 6258,
"self": 7.57971986500047
}
}
}
}
},
"workers": {
"total": 0.09264800500227466,
"count": 6264,
"self": 0.0,
"children": {
"worker_root": {
"total": 215.49129896300167,
"count": 6264,
"is_parallel": true,
"self": 109.81543781099987,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005103849000022365,
"count": 1,
"is_parallel": true,
"self": 0.002761932999931105,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023419160000912598,
"count": 8,
"is_parallel": true,
"self": 0.0023419160000912598
}
}
},
"UnityEnvironment.step": {
"total": 0.05162206199997854,
"count": 1,
"is_parallel": true,
"self": 0.0005609809999214121,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048644900005001546,
"count": 1,
"is_parallel": true,
"self": 0.00048644900005001546
},
"communicator.exchange": {
"total": 0.04863985499997625,
"count": 1,
"is_parallel": true,
"self": 0.04863985499997625
},
"steps_from_proto": {
"total": 0.0019347770000308628,
"count": 1,
"is_parallel": true,
"self": 0.0005720340001289514,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013627429999019114,
"count": 8,
"is_parallel": true,
"self": 0.0013627429999019114
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 105.6758611520018,
"count": 6263,
"is_parallel": true,
"self": 3.2793998159993407,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.5232583559973705,
"count": 6263,
"is_parallel": true,
"self": 2.5232583559973705
},
"communicator.exchange": {
"total": 89.83286614800363,
"count": 6263,
"is_parallel": true,
"self": 89.83286614800363
},
"steps_from_proto": {
"total": 10.040336832001458,
"count": 6263,
"is_parallel": true,
"self": 2.4256375919957804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 7.614699240005677,
"count": 50104,
"is_parallel": true,
"self": 7.614699240005677
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 76.76936527199729,
"count": 6264,
"self": 0.196145302994978,
"children": {
"process_trajectory": {
"total": 16.45395645200216,
"count": 6264,
"self": 16.45395645200216
},
"_update_policy": {
"total": 60.11926351700015,
"count": 30,
"self": 23.195737322997616,
"children": {
"TorchPPOOptimizer.update": {
"total": 36.923526194002534,
"count": 2286,
"self": 36.923526194002534
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.930001058615744e-07,
"count": 1,
"self": 8.930001058615744e-07
},
"TrainerController._save_models": {
"total": 0.10884573900000305,
"count": 1,
"self": 0.0014978359999986424,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10734790300000441,
"count": 1,
"self": 0.10734790300000441
}
}
}
}
}
}
}