ppo-PyramidsV1 / run_logs /timers.json
AGI-CEO's picture
init
259d165 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45222407579421997,
"min": 0.45031824707984924,
"max": 1.458025336265564,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13545.015625,
"min": 13545.015625,
"max": 44230.65625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989935.0,
"min": 29931.0,
"max": 989935.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989935.0,
"min": 29931.0,
"max": 989935.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.55757075548172,
"min": -0.12418726086616516,
"max": 0.55757075548172,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.8895263671875,
"min": -29.92913055419922,
"max": 153.8895263671875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025798780843615532,
"min": 0.001230901456438005,
"max": 0.47238585352897644,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.1204633712768555,
"min": 0.3348051905632019,
"max": 111.9554443359375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06992846984455052,
"min": 0.06374605744603043,
"max": 0.0724483949298917,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0489270476682577,
"min": 0.5795871594391336,
"max": 1.0608616909251878,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016573755301780894,
"min": 0.00046503534088704275,
"max": 0.016573755301780894,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2486063295267134,
"min": 0.005580424090644513,
"max": 0.2486063295267134,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.434857521746671e-06,
"min": 7.434857521746671e-06,
"max": 0.000294759976746675,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011152286282620006,
"min": 0.00011152286282620006,
"max": 0.0034382088539303994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247825333333334,
"min": 0.10247825333333334,
"max": 0.19825332499999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371738000000001,
"min": 1.4778601,
"max": 2.4013907999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025757750800000013,
"min": 0.00025757750800000013,
"max": 0.0098255071675,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038636626200000018,
"min": 0.0038636626200000018,
"max": 0.11461235303999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008056205697357655,
"min": 0.008000189438462257,
"max": 0.36947691440582275,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1208430826663971,
"min": 0.1120026558637619,
"max": 2.955815315246582,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.3837209302326,
"min": 345.3837209302326,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29703.0,
"min": 17369.0,
"max": 32756.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5848302115188089,
"min": -0.9999161809682846,
"max": 1.5848302115188089,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 136.29539819061756,
"min": -30.997401610016823,
"max": 136.29539819061756,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5848302115188089,
"min": -0.9999161809682846,
"max": 1.5848302115188089,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 136.29539819061756,
"min": -30.997401610016823,
"max": 136.29539819061756,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028647029895922927,
"min": 0.028647029895922927,
"max": 7.631950915687614,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4636445710493717,
"min": 2.4636445710493717,
"max": 137.37511648237705,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715259905",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715262276"
},
"total": 2370.8327652850003,
"count": 1,
"self": 0.6457729829999153,
"children": {
"run_training.setup": {
"total": 0.061939832999996725,
"count": 1,
"self": 0.061939832999996725
},
"TrainerController.start_learning": {
"total": 2370.1250524690004,
"count": 1,
"self": 1.5321281059582361,
"children": {
"TrainerController._reset_env": {
"total": 3.05153940699995,
"count": 1,
"self": 3.05153940699995
},
"TrainerController.advance": {
"total": 2365.4493206580423,
"count": 63758,
"self": 1.5998393751065123,
"children": {
"env_step": {
"total": 1695.7151448649677,
"count": 63758,
"self": 1547.7493479258965,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.01777367603722,
"count": 63758,
"self": 5.452806942973552,
"children": {
"TorchPolicy.evaluate": {
"total": 141.56496673306367,
"count": 62553,
"self": 141.56496673306367
}
}
},
"workers": {
"total": 0.9480232630340879,
"count": 63758,
"self": 0.0,
"children": {
"worker_root": {
"total": 2364.2101253830356,
"count": 63758,
"is_parallel": true,
"self": 953.2112145269991,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005453439000007165,
"count": 1,
"is_parallel": true,
"self": 0.0038916349999453814,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015618040000617839,
"count": 8,
"is_parallel": true,
"self": 0.0015618040000617839
}
}
},
"UnityEnvironment.step": {
"total": 0.05156149499998719,
"count": 1,
"is_parallel": true,
"self": 0.0006632819997776096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047409300009348954,
"count": 1,
"is_parallel": true,
"self": 0.00047409300009348954
},
"communicator.exchange": {
"total": 0.048620781000067836,
"count": 1,
"is_parallel": true,
"self": 0.048620781000067836
},
"steps_from_proto": {
"total": 0.001803339000048254,
"count": 1,
"is_parallel": true,
"self": 0.0003875419999985752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001415797000049679,
"count": 8,
"is_parallel": true,
"self": 0.001415797000049679
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1410.9989108560364,
"count": 63757,
"is_parallel": true,
"self": 36.36750404308236,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.650035150041845,
"count": 63757,
"is_parallel": true,
"self": 25.650035150041845
},
"communicator.exchange": {
"total": 1239.847735687948,
"count": 63757,
"is_parallel": true,
"self": 1239.847735687948
},
"steps_from_proto": {
"total": 109.13363597496448,
"count": 63757,
"is_parallel": true,
"self": 22.52969078999297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.60394518497151,
"count": 510056,
"is_parallel": true,
"self": 86.60394518497151
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 668.1343364179681,
"count": 63758,
"self": 2.932238520954911,
"children": {
"process_trajectory": {
"total": 141.7804977120079,
"count": 63758,
"self": 141.50903334600775,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2714643660001457,
"count": 2,
"self": 0.2714643660001457
}
}
},
"_update_policy": {
"total": 523.4216001850053,
"count": 450,
"self": 308.91229263299306,
"children": {
"TorchPPOOptimizer.update": {
"total": 214.50930755201227,
"count": 22761,
"self": 214.50930755201227
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.089999366551638e-07,
"count": 1,
"self": 9.089999366551638e-07
},
"TrainerController._save_models": {
"total": 0.09206338900003175,
"count": 1,
"self": 0.0014889880003465805,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09057440099968517,
"count": 1,
"self": 0.09057440099968517
}
}
}
}
}
}
}