ppo-Pyramids / run_logs /timers.json
manuu01's picture
First training
0b57c1e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2982460558414459,
"min": 0.2788754403591156,
"max": 0.39902108907699585,
"count": 40
},
"Pyramids.Policy.Entropy.sum": {
"value": 1479.3004150390625,
"min": 1320.7540283203125,
"max": 2101.974365234375,
"count": 40
},
"Pyramids.Step.mean": {
"value": 999897.0,
"min": 804909.0,
"max": 999897.0,
"count": 40
},
"Pyramids.Step.sum": {
"value": 999897.0,
"min": 804909.0,
"max": 999897.0,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3927173614501953,
"min": 0.16222435235977173,
"max": 0.6028797030448914,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 16.494129180908203,
"min": 6.651198387145996,
"max": 28.215749740600586,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010184453800320625,
"min": -0.08904891461133957,
"max": 0.010184453800320625,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.42774704098701477,
"min": -3.918152093887329,
"max": 0.42774704098701477,
"count": 40
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 323.5,
"min": 233.42857142857142,
"max": 724.5,
"count": 40
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 3235.0,
"min": 1634.0,
"max": 9158.0,
"count": 40
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.476479971408844,
"min": 0.5252249669283628,
"max": 1.7665713897773199,
"count": 40
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 14.76479971408844,
"min": 4.201799735426903,
"max": 26.457399681210518,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.476479971408844,
"min": 0.5252249669283628,
"max": 1.7665713897773199,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 14.76479971408844,
"min": 4.201799735426903,
"max": 26.457399681210518,
"count": 40
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029189279896672814,
"min": 0.02825928066990205,
"max": 0.07618293218547478,
"count": 40
},
"Pyramids.Policy.RndReward.sum": {
"value": 0.29189279896672815,
"min": 0.19781496468931437,
"max": 0.9577268338762224,
"count": 40
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06130017164590147,
"min": 0.05845071774739255,
"max": 0.07593325820441048,
"count": 40
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.12260034329180294,
"min": 0.11894674527381237,
"max": 0.22379596517809353,
"count": 40
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010486101050143287,
"min": 0.0043403216695878655,
"max": 0.018942632031717543,
"count": 40
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.020972202100286573,
"min": 0.008680643339175731,
"max": 0.05682789609515263,
"count": 40
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.000099750000001e-06,
"min": 1.000099750000001e-06,
"max": 7.873068031735001e-05,
"count": 40
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.000199500000002e-06,
"min": 2.000199500000002e-06,
"max": 0.00022493464376640004,
"count": 40
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10025,
"min": 0.10025,
"max": 0.11968265,
"count": 40
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.2005,
"min": 0.2005,
"max": 0.35623360000000004,
"count": 40
},
"Pyramids.Policy.Beta.mean": {
"value": 3.4975000000000023e-05,
"min": 3.4975000000000023e-05,
"max": 0.0019762967350000007,
"count": 40
},
"Pyramids.Policy.Beta.sum": {
"value": 6.995000000000005e-05,
"min": 6.995000000000005e-05,
"max": 0.005647736640000001,
"count": 40
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008905449882149696,
"min": 0.008485357277095318,
"max": 0.011165566742420197,
"count": 40
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.017810899764299393,
"min": 0.016970714554190636,
"max": 0.03349670022726059,
"count": 40
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690539927",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690540447"
},
"total": 519.894824899,
"count": 1,
"self": 0.9943624620009359,
"children": {
"run_training.setup": {
"total": 0.03455513799963228,
"count": 1,
"self": 0.03455513799963228
},
"TrainerController.start_learning": {
"total": 518.8659072989994,
"count": 1,
"self": 0.4367341879642481,
"children": {
"TrainerController._reset_env": {
"total": 0.9879726039998786,
"count": 1,
"self": 0.9879726039998786
},
"TrainerController.advance": {
"total": 517.3269477450358,
"count": 12898,
"self": 0.39302272900295065,
"children": {
"env_step": {
"total": 343.16505507897364,
"count": 12898,
"self": 319.4674292979689,
"children": {
"SubprocessEnvManager._take_step": {
"total": 23.42836658102169,
"count": 12898,
"self": 1.0010034780698334,
"children": {
"TorchPolicy.evaluate": {
"total": 22.42736310295186,
"count": 12556,
"self": 22.42736310295186
}
}
},
"workers": {
"total": 0.2692591999830256,
"count": 12898,
"self": 0.0,
"children": {
"worker_root": {
"total": 517.6132438219938,
"count": 12898,
"is_parallel": true,
"self": 225.79152497491395,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0013732600000366801,
"count": 1,
"is_parallel": true,
"self": 0.00038755100013077026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009857089999059099,
"count": 8,
"is_parallel": true,
"self": 0.0009857089999059099
}
}
},
"UnityEnvironment.step": {
"total": 0.05134297599943238,
"count": 1,
"is_parallel": true,
"self": 0.000539040000148816,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002860199992937851,
"count": 1,
"is_parallel": true,
"self": 0.0002860199992937851
},
"communicator.exchange": {
"total": 0.048784696999973676,
"count": 1,
"is_parallel": true,
"self": 0.048784696999973676
},
"steps_from_proto": {
"total": 0.0017332190000161063,
"count": 1,
"is_parallel": true,
"self": 0.00035108899919578107,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013821300008203252,
"count": 8,
"is_parallel": true,
"self": 0.0013821300008203252
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 291.82171884707986,
"count": 12897,
"is_parallel": true,
"self": 6.827149842975814,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.9066212840416483,
"count": 12897,
"is_parallel": true,
"self": 3.9066212840416483
},
"communicator.exchange": {
"total": 261.2293794990219,
"count": 12897,
"is_parallel": true,
"self": 261.2293794990219
},
"steps_from_proto": {
"total": 19.858568221040514,
"count": 12897,
"is_parallel": true,
"self": 4.481742367082916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 15.376825853957598,
"count": 103176,
"is_parallel": true,
"self": 15.376825853957598
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 173.7688699370592,
"count": 12898,
"self": 0.9097520010345761,
"children": {
"process_trajectory": {
"total": 24.18563149502552,
"count": 12898,
"self": 24.060020305026228,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12561118999929022,
"count": 1,
"self": 0.12561118999929022
}
}
},
"_update_policy": {
"total": 148.6734864409991,
"count": 94,
"self": 61.99449725899649,
"children": {
"TorchPPOOptimizer.update": {
"total": 86.67898918200262,
"count": 4515,
"self": 86.67898918200262
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2909995348309167e-06,
"count": 1,
"self": 1.2909995348309167e-06
},
"TrainerController._save_models": {
"total": 0.11425147099998867,
"count": 1,
"self": 0.0019511899999997695,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1123002809999889,
"count": 1,
"self": 0.1123002809999889
}
}
}
}
}
}
}