ppo-Pyramids / run_logs /timers.json
naveen1divakar's picture
First Push
fa2ef63 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1471748799085617,
"min": 0.13690005242824554,
"max": 1.4876949787139893,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4415.24658203125,
"min": 4074.1455078125,
"max": 45130.71484375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999878.0,
"min": 29952.0,
"max": 2999878.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999878.0,
"min": 29952.0,
"max": 2999878.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7705802321434021,
"min": -0.10834674537181854,
"max": 0.8698886632919312,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 232.71522521972656,
"min": -26.003219604492188,
"max": 265.3160400390625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.001598284812644124,
"min": -0.016859225928783417,
"max": 0.24399752914905548,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.4826820194721222,
"min": -5.007190227508545,
"max": 58.55940628051758,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06900397748090438,
"min": 0.06452420612375502,
"max": 0.07413557439992603,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9660556847326612,
"min": 0.4921033891431731,
"max": 1.0869312021532096,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01781005312001244,
"min": 7.873184127952322e-05,
"max": 0.01781005312001244,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24934074368017414,
"min": 0.0009447820953542786,
"max": 0.25156012953911894,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5724280473190502e-06,
"min": 1.5724280473190502e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.2013992662466702e-05,
"min": 2.2013992662466702e-05,
"max": 0.003717869460710233,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052410952380952,
"min": 0.10052410952380952,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4073375333333333,
"min": 1.3897045333333333,
"max": 2.707453266666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.235854142857152e-05,
"min": 6.235854142857152e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008730195800000013,
"min": 0.0008730195800000013,
"max": 0.12394504768999998,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005207408685237169,
"min": 0.004798105917870998,
"max": 0.3373314440250397,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07290372252464294,
"min": 0.06717348098754883,
"max": 2.3613200187683105,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 226.98529411764707,
"min": 216.79861111111111,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30870.0,
"min": 15984.0,
"max": 35450.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7288779319976182,
"min": -1.0000000521540642,
"max": 1.7819999878605206,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 235.12739875167608,
"min": -32.000001668930054,
"max": 254.78019939363003,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7288779319976182,
"min": -1.0000000521540642,
"max": 1.7819999878605206,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 235.12739875167608,
"min": -32.000001668930054,
"max": 254.78019939363003,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.012226224798456208,
"min": 0.011491444979355947,
"max": 7.188269507139921,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.6627665725900442,
"min": 1.4320630248403177,
"max": 115.01231211423874,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749651206",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749659197"
},
"total": 7990.335616017999,
"count": 1,
"self": 0.48975502000030247,
"children": {
"run_training.setup": {
"total": 0.020078172000012273,
"count": 1,
"self": 0.020078172000012273
},
"TrainerController.start_learning": {
"total": 7989.825782825999,
"count": 1,
"self": 5.199687871937385,
"children": {
"TrainerController._reset_env": {
"total": 2.2379248070001267,
"count": 1,
"self": 2.2379248070001267
},
"TrainerController.advance": {
"total": 7982.291184967064,
"count": 194654,
"self": 5.213146053793025,
"children": {
"env_step": {
"total": 5797.273527327264,
"count": 194654,
"self": 5257.859390627365,
"children": {
"SubprocessEnvManager._take_step": {
"total": 536.3464188870325,
"count": 194654,
"self": 15.796881484900496,
"children": {
"TorchPolicy.evaluate": {
"total": 520.549537402132,
"count": 187553,
"self": 520.549537402132
}
}
},
"workers": {
"total": 3.0677178128653395,
"count": 194654,
"self": 0.0,
"children": {
"worker_root": {
"total": 7971.763136825987,
"count": 194654,
"is_parallel": true,
"self": 3109.0482559648526,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029706820000683365,
"count": 1,
"is_parallel": true,
"self": 0.0008487519994559989,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021219300006123376,
"count": 8,
"is_parallel": true,
"self": 0.0021219300006123376
}
}
},
"UnityEnvironment.step": {
"total": 0.0498361320001095,
"count": 1,
"is_parallel": true,
"self": 0.0005349279999791179,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005230209999353974,
"count": 1,
"is_parallel": true,
"self": 0.0005230209999353974
},
"communicator.exchange": {
"total": 0.04708934300015244,
"count": 1,
"is_parallel": true,
"self": 0.04708934300015244
},
"steps_from_proto": {
"total": 0.0016888400000425463,
"count": 1,
"is_parallel": true,
"self": 0.00038889899997229804,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012999410000702483,
"count": 8,
"is_parallel": true,
"self": 0.0012999410000702483
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4862.714880861135,
"count": 194653,
"is_parallel": true,
"self": 104.18845278116169,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.87457518586871,
"count": 194653,
"is_parallel": true,
"self": 76.87457518586871
},
"communicator.exchange": {
"total": 4359.0291461948345,
"count": 194653,
"is_parallel": true,
"self": 4359.0291461948345
},
"steps_from_proto": {
"total": 322.6227066992692,
"count": 194653,
"is_parallel": true,
"self": 67.38344741427454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 255.23925928499466,
"count": 1557224,
"is_parallel": true,
"self": 255.23925928499466
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2179.804511586007,
"count": 194654,
"self": 10.065874891726708,
"children": {
"process_trajectory": {
"total": 441.943926444269,
"count": 194654,
"self": 441.2614479252684,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6824785190005969,
"count": 6,
"self": 0.6824785190005969
}
}
},
"_update_policy": {
"total": 1727.7947102500111,
"count": 1382,
"self": 951.2882620168898,
"children": {
"TorchPPOOptimizer.update": {
"total": 776.5064482331213,
"count": 68307,
"self": 776.5064482331213
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6119993233587593e-06,
"count": 1,
"self": 1.6119993233587593e-06
},
"TrainerController._save_models": {
"total": 0.09698356799890462,
"count": 1,
"self": 0.0014890839975123527,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09549448400139227,
"count": 1,
"self": 0.09549448400139227
}
}
}
}
}
}
}