ppo-Pyramids / run_logs /timers.json
Yanrds's picture
Init
81b4390
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1622338891029358,
"min": 0.14618344604969025,
"max": 1.4289162158966064,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4913.740234375,
"min": 4383.16455078125,
"max": 43347.6015625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999874.0,
"min": 29952.0,
"max": 2999874.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999874.0,
"min": 29952.0,
"max": 2999874.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8521166443824768,
"min": -0.09871521592140198,
"max": 0.9118103384971619,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 269.26885986328125,
"min": -23.790367126464844,
"max": 283.6611328125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022271370515227318,
"min": -0.03286188840866089,
"max": 0.2849113941192627,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.037753105163574,
"min": -9.135604858398438,
"max": 68.91742706298828,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06838717891757067,
"min": 0.06397492079122456,
"max": 0.07394123171300938,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9574205048459894,
"min": 0.4882302018599692,
"max": 1.1091184756951407,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015042851448275283,
"min": 0.00011704448483805993,
"max": 0.016249826336639843,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21059992027585395,
"min": 0.001638622787732839,
"max": 0.22950032894846564,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.436570949747612e-06,
"min": 1.436570949747612e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.0111993296466567e-05,
"min": 2.0111993296466567e-05,
"max": 0.004052766049078,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10047882380952382,
"min": 0.10047882380952382,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4067035333333335,
"min": 1.3962282666666668,
"max": 2.7675462,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.783449857142833e-05,
"min": 5.783449857142833e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008096829799999967,
"min": 0.0008096829799999967,
"max": 0.1350971078,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006701822392642498,
"min": 0.00638990942388773,
"max": 0.41804128885269165,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09382551163434982,
"min": 0.08945873379707336,
"max": 2.9262890815734863,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 204.1689189189189,
"min": 198.29655172413794,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30217.0,
"min": 15984.0,
"max": 33525.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7823135075536933,
"min": -1.0000000521540642,
"max": 1.8017034343604383,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 263.7823991179466,
"min": -31.991201624274254,
"max": 263.7823991179466,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7823135075536933,
"min": -1.0000000521540642,
"max": 1.8017034343604383,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 263.7823991179466,
"min": -31.991201624274254,
"max": 263.7823991179466,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.01411674489114572,
"min": 0.01392515986585279,
"max": 7.474438391625881,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.0892782438895665,
"min": 1.9608714027108363,
"max": 119.5910142660141,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681845689",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681852927"
},
"total": 7238.132455441,
"count": 1,
"self": 0.42544537799949467,
"children": {
"run_training.setup": {
"total": 0.10305819700010943,
"count": 1,
"self": 0.10305819700010943
},
"TrainerController.start_learning": {
"total": 7237.603951866001,
"count": 1,
"self": 4.459148576253938,
"children": {
"TrainerController._reset_env": {
"total": 4.598355805999972,
"count": 1,
"self": 4.598355805999972
},
"TrainerController.advance": {
"total": 7228.444594087748,
"count": 195100,
"self": 4.729589556640349,
"children": {
"env_step": {
"total": 5379.953861019,
"count": 195100,
"self": 5052.31178084912,
"children": {
"SubprocessEnvManager._take_step": {
"total": 324.9731730289752,
"count": 195100,
"self": 14.875191451193814,
"children": {
"TorchPolicy.evaluate": {
"total": 310.0979815777814,
"count": 187553,
"self": 310.0979815777814
}
}
},
"workers": {
"total": 2.6689071409050484,
"count": 195100,
"self": 0.0,
"children": {
"worker_root": {
"total": 7221.8169295218095,
"count": 195100,
"is_parallel": true,
"self": 2523.9537835697884,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018918369999028073,
"count": 1,
"is_parallel": true,
"self": 0.0006427329999496578,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012491039999531495,
"count": 8,
"is_parallel": true,
"self": 0.0012491039999531495
}
}
},
"UnityEnvironment.step": {
"total": 0.08767036300014297,
"count": 1,
"is_parallel": true,
"self": 0.000697515000410931,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005459899998641049,
"count": 1,
"is_parallel": true,
"self": 0.0005459899998641049
},
"communicator.exchange": {
"total": 0.08425507699985246,
"count": 1,
"is_parallel": true,
"self": 0.08425507699985246
},
"steps_from_proto": {
"total": 0.0021717810000154714,
"count": 1,
"is_parallel": true,
"self": 0.00046036100025048654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017114199997649848,
"count": 8,
"is_parallel": true,
"self": 0.0017114199997649848
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4697.863145952021,
"count": 195099,
"is_parallel": true,
"self": 98.45756610395074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 72.91023982106185,
"count": 195099,
"is_parallel": true,
"self": 72.91023982106185
},
"communicator.exchange": {
"total": 4227.688078198946,
"count": 195099,
"is_parallel": true,
"self": 4227.688078198946
},
"steps_from_proto": {
"total": 298.8072618280628,
"count": 195099,
"is_parallel": true,
"self": 64.83159208341863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 233.97566974464416,
"count": 1560792,
"is_parallel": true,
"self": 233.97566974464416
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1843.761143512107,
"count": 195100,
"self": 8.401244521984381,
"children": {
"process_trajectory": {
"total": 340.66981527112057,
"count": 195100,
"self": 340.07694639512056,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5928688760000114,
"count": 6,
"self": 0.5928688760000114
}
}
},
"_update_policy": {
"total": 1494.6900837190021,
"count": 1403,
"self": 961.5675748199594,
"children": {
"TorchPPOOptimizer.update": {
"total": 533.1225088990427,
"count": 68433,
"self": 533.1225088990427
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1060001270379871e-06,
"count": 1,
"self": 1.1060001270379871e-06
},
"TrainerController._save_models": {
"total": 0.10185228999944229,
"count": 1,
"self": 0.00149304799924721,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10035924200019508,
"count": 1,
"self": 0.10035924200019508
}
}
}
}
}
}
}