ppo-pyramids / run_logs /timers.json
satyamandavilli's picture
First Push
e34e295 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.1308037042617798,
"min": 1.0398516654968262,
"max": 1.4295711517333984,
"count": 11
},
"Pyramids.Policy.Entropy.sum": {
"value": 33887.92578125,
"min": 31145.63671875,
"max": 43367.46875,
"count": 11
},
"Pyramids.Step.mean": {
"value": 329879.0,
"min": 29952.0,
"max": 329879.0,
"count": 11
},
"Pyramids.Step.sum": {
"value": 329879.0,
"min": 29952.0,
"max": 329879.0,
"count": 11
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.018549079075455666,
"min": -0.11526176333427429,
"max": 0.017504574730992317,
"count": 11
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.5074262619018555,
"min": -27.893346786499023,
"max": 4.148584365844727,
"count": 11
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02716442383825779,
"min": 0.02716442383825779,
"max": 0.4139914810657501,
"count": 11
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.600955009460449,
"min": 6.600955009460449,
"max": 98.11598205566406,
"count": 11
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06703679753914314,
"min": 0.06621039790661089,
"max": 0.0717391827910578,
"count": 11
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.938515165548004,
"min": 0.48714895678574394,
"max": 1.0043485590748091,
"count": 11
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.00348817364891378,
"min": 0.001142571199744791,
"max": 0.008168606512899804,
"count": 11
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04883443108479292,
"min": 0.01521227236604492,
"max": 0.057180245590298634,
"count": 11
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00020564836002198573,
"min": 0.00020564836002198573,
"max": 0.00029515063018788575,
"count": 11
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0028790770403078004,
"min": 0.0020660544113152,
"max": 0.0036331408889531,
"count": 11
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16854944285714282,
"min": 0.16854944285714282,
"max": 0.19838354285714285,
"count": 11
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.3596921999999996,
"min": 1.3886848,
"max": 2.6110469,
"count": 11
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006858089341428572,
"min": 0.006858089341428572,
"max": 0.00983851593142857,
"count": 11
},
"Pyramids.Policy.Beta.sum": {
"value": 0.09601325078000002,
"min": 0.06886961152,
"max": 0.12112358530999999,
"count": 11
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.024172518402338028,
"min": 0.024172518402338028,
"max": 0.4678037166595459,
"count": 11
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.338415265083313,
"min": 0.338415265083313,
"max": 3.2746260166168213,
"count": 11
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 883.65625,
"min": 883.65625,
"max": 999.0,
"count": 11
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28277.0,
"min": 15984.0,
"max": 33386.0,
"count": 11
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.3215375440195203,
"min": -1.0000000521540642,
"max": -0.3215375440195203,
"count": 11
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -10.289201408624649,
"min": -29.418001741170883,
"max": -10.289201408624649,
"count": 11
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.3215375440195203,
"min": -1.0000000521540642,
"max": -0.3215375440195203,
"count": 11
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -10.289201408624649,
"min": -29.418001741170883,
"max": -10.289201408624649,
"count": 11
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.22664340073242784,
"min": 0.22664340073242784,
"max": 10.145370788872242,
"count": 11
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.252588823437691,
"min": 7.242296002805233,
"max": 162.32593262195587,
"count": 11
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 11
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 11
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718824099",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718825220"
},
"total": 1121.0809393199997,
"count": 1,
"self": 0.4026389040009235,
"children": {
"run_training.setup": {
"total": 0.07447329199931119,
"count": 1,
"self": 0.07447329199931119
},
"TrainerController.start_learning": {
"total": 1120.6038271239995,
"count": 1,
"self": 0.7882218019931315,
"children": {
"TrainerController._reset_env": {
"total": 3.156991851999919,
"count": 1,
"self": 3.156991851999919
},
"TrainerController.advance": {
"total": 1116.4269058950067,
"count": 21569,
"self": 0.8995950239705053,
"children": {
"env_step": {
"total": 715.2752966839826,
"count": 21569,
"self": 652.419127857107,
"children": {
"SubprocessEnvManager._take_step": {
"total": 62.36315696294423,
"count": 21569,
"self": 2.7653587229369805,
"children": {
"TorchPolicy.evaluate": {
"total": 59.59779824000725,
"count": 21407,
"self": 59.59779824000725
}
}
},
"workers": {
"total": 0.493011863931315,
"count": 21568,
"self": 0.0,
"children": {
"worker_root": {
"total": 1117.8726802580586,
"count": 21568,
"is_parallel": true,
"self": 533.2390788240909,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032746819997555576,
"count": 1,
"is_parallel": true,
"self": 0.0010501700007807813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022245119989747764,
"count": 8,
"is_parallel": true,
"self": 0.0022245119989747764
}
}
},
"UnityEnvironment.step": {
"total": 0.06598024100003386,
"count": 1,
"is_parallel": true,
"self": 0.0007907359995442675,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005836860000272281,
"count": 1,
"is_parallel": true,
"self": 0.0005836860000272281
},
"communicator.exchange": {
"total": 0.06251860199972725,
"count": 1,
"is_parallel": true,
"self": 0.06251860199972725
},
"steps_from_proto": {
"total": 0.0020872170007351087,
"count": 1,
"is_parallel": true,
"self": 0.0004451500008144649,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016420669999206439,
"count": 8,
"is_parallel": true,
"self": 0.0016420669999206439
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 584.6336014339677,
"count": 21567,
"is_parallel": true,
"self": 18.131345316946863,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.108870893032872,
"count": 21567,
"is_parallel": true,
"self": 11.108870893032872
},
"communicator.exchange": {
"total": 509.26788400898204,
"count": 21567,
"is_parallel": true,
"self": 509.26788400898204
},
"steps_from_proto": {
"total": 46.1255012150059,
"count": 21567,
"is_parallel": true,
"self": 9.9612487092827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.1642525057232,
"count": 172536,
"is_parallel": true,
"self": 36.1642525057232
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 400.25201418705365,
"count": 21568,
"self": 1.4449834341075984,
"children": {
"process_trajectory": {
"total": 61.19179011894357,
"count": 21568,
"self": 61.19179011894357
},
"_update_policy": {
"total": 337.6152406340025,
"count": 143,
"self": 133.15822928901707,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.4570113449854,
"count": 7809,
"self": 204.4570113449854
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5180003174464218e-06,
"count": 1,
"self": 1.5180003174464218e-06
},
"TrainerController._save_models": {
"total": 0.23170605699942826,
"count": 1,
"self": 0.0029494599993995507,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2287565970000287,
"count": 1,
"self": 0.2287565970000287
}
}
}
}
}
}
}