ppo-Pyramids / run_logs /timers.json
mwissing's picture
First Push
bfb2765
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34670597314834595,
"min": 0.34615060687065125,
"max": 1.48213529586792,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10306.875,
"min": 10306.875,
"max": 44962.0546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989922.0,
"min": 29878.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989922.0,
"min": 29878.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6424602270126343,
"min": -0.055414315313100815,
"max": 0.6823984384536743,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 183.74362182617188,
"min": -13.133193016052246,
"max": 192.4363555908203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016552358865737915,
"min": 0.002733916277065873,
"max": 0.346781849861145,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.733974456787109,
"min": 0.7600287199020386,
"max": 83.92121124267578,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07060184093591358,
"min": 0.06483471585670485,
"max": 0.07587350159037623,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9884257731027902,
"min": 0.5311145111326336,
"max": 1.067106466080683,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01594610102308382,
"min": 0.0013081351961030888,
"max": 0.017152636766848507,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22324541432317346,
"min": 0.018313892745443244,
"max": 0.24081066871511606,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.470461795592857e-06,
"min": 7.470461795592857e-06,
"max": 0.0002952360873022571,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001045864651383,
"min": 0.0001045864651383,
"max": 0.0036359584880138996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249012142857143,
"min": 0.10249012142857143,
"max": 0.19841202857142856,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348617000000001,
"min": 1.3888842,
"max": 2.6119861,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025876313071428577,
"min": 0.00025876313071428577,
"max": 0.009841361654285714,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036226838300000004,
"min": 0.0036226838300000004,
"max": 0.12121741139,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012069856747984886,
"min": 0.012069856747984886,
"max": 0.49766436219215393,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1689779907464981,
"min": 0.1689779907464981,
"max": 3.4836504459381104,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 296.0566037735849,
"min": 271.0980392156863,
"max": 981.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31382.0,
"min": 16677.0,
"max": 32732.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6473131851767593,
"min": -0.8642941669506186,
"max": 1.7294368817678933,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 174.6151976287365,
"min": -26.595801688730717,
"max": 178.131998822093,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6473131851767593,
"min": -0.8642941669506186,
"max": 1.7294368817678933,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 174.6151976287365,
"min": -26.595801688730717,
"max": 178.131998822093,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03700005493916457,
"min": 0.0339167768113627,
"max": 9.454067622914033,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9220058235514443,
"min": 3.4934280115703586,
"max": 160.71914958953857,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677156627",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677158991"
},
"total": 2363.7496007439995,
"count": 1,
"self": 0.4908028820000254,
"children": {
"run_training.setup": {
"total": 0.18343311200010248,
"count": 1,
"self": 0.18343311200010248
},
"TrainerController.start_learning": {
"total": 2363.0753647499996,
"count": 1,
"self": 1.4112876969747958,
"children": {
"TrainerController._reset_env": {
"total": 6.556698480000023,
"count": 1,
"self": 6.556698480000023
},
"TrainerController.advance": {
"total": 2355.022312724025,
"count": 64148,
"self": 1.4814635370130418,
"children": {
"env_step": {
"total": 1598.1936867950008,
"count": 64148,
"self": 1482.6865004839499,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.6488389150743,
"count": 64148,
"self": 4.79480462210563,
"children": {
"TorchPolicy.evaluate": {
"total": 109.85403429296866,
"count": 62562,
"self": 37.36226736789854,
"children": {
"TorchPolicy.sample_actions": {
"total": 72.49176692507012,
"count": 62562,
"self": 72.49176692507012
}
}
}
}
},
"workers": {
"total": 0.8583473959765797,
"count": 64148,
"self": 0.0,
"children": {
"worker_root": {
"total": 2357.8289917699835,
"count": 64148,
"is_parallel": true,
"self": 993.0450782070357,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002853337000033207,
"count": 1,
"is_parallel": true,
"self": 0.0009301219999997556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019232150000334514,
"count": 8,
"is_parallel": true,
"self": 0.0019232150000334514
}
}
},
"UnityEnvironment.step": {
"total": 0.04617006100011167,
"count": 1,
"is_parallel": true,
"self": 0.00052736000020559,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005271689999517548,
"count": 1,
"is_parallel": true,
"self": 0.0005271689999517548
},
"communicator.exchange": {
"total": 0.043572516000040196,
"count": 1,
"is_parallel": true,
"self": 0.043572516000040196
},
"steps_from_proto": {
"total": 0.0015430159999141324,
"count": 1,
"is_parallel": true,
"self": 0.00038690799988216895,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011561080000319635,
"count": 8,
"is_parallel": true,
"self": 0.0011561080000319635
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1364.7839135629479,
"count": 64147,
"is_parallel": true,
"self": 32.291711077888976,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.384218727021562,
"count": 64147,
"is_parallel": true,
"self": 23.384218727021562
},
"communicator.exchange": {
"total": 1214.6637610580258,
"count": 64147,
"is_parallel": true,
"self": 1214.6637610580258
},
"steps_from_proto": {
"total": 94.44422270001155,
"count": 64147,
"is_parallel": true,
"self": 22.64808148503448,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.79614121497707,
"count": 513176,
"is_parallel": true,
"self": 71.79614121497707
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 755.3471623920113,
"count": 64148,
"self": 2.744578014981016,
"children": {
"process_trajectory": {
"total": 169.2325484070227,
"count": 64148,
"self": 168.99433193702293,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2382164699997702,
"count": 2,
"self": 0.2382164699997702
}
}
},
"_update_policy": {
"total": 583.3700359700076,
"count": 459,
"self": 223.44349088704894,
"children": {
"TorchPPOOptimizer.update": {
"total": 359.9265450829587,
"count": 22773,
"self": 359.9265450829587
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.249997674487531e-07,
"count": 1,
"self": 9.249997674487531e-07
},
"TrainerController._save_models": {
"total": 0.08506492400010757,
"count": 1,
"self": 0.001449626000066928,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08361529800004064,
"count": 1,
"self": 0.08361529800004064
}
}
}
}
}
}
}