ppo-Pyramids / run_logs /timers.json
JKuniszewski's picture
First Push
6ea7831 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37915322184562683,
"min": 0.37915322184562683,
"max": 1.4299777746200562,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11344.2646484375,
"min": 11344.2646484375,
"max": 43379.8046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989971.0,
"min": 29950.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989971.0,
"min": 29950.0,
"max": 989971.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.46266165375709534,
"min": -0.0938304215669632,
"max": 0.46266165375709534,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 124.91864776611328,
"min": -22.70696258544922,
"max": 124.91864776611328,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02452230453491211,
"min": -0.02144542895257473,
"max": 0.5268363356590271,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.6210222244262695,
"min": -5.575811386108398,
"max": 124.86021423339844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06932729298057469,
"min": 0.06516925668152648,
"max": 0.07419057507788586,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9705821017280457,
"min": 0.5717791619913981,
"max": 1.079742052094148,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01820543239000599,
"min": 0.00138851060764724,
"max": 0.01820543239000599,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25487605346008385,
"min": 0.019439148507061358,
"max": 0.25487605346008385,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.447126089085713e-06,
"min": 7.447126089085713e-06,
"max": 0.000295013476662175,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010425976524719999,
"min": 0.00010425976524719999,
"max": 0.0036368650877116994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248234285714286,
"min": 0.10248234285714286,
"max": 0.198337825,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347528,
"min": 1.4347528,
"max": 2.6122883000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002579860514285714,
"min": 0.0002579860514285714,
"max": 0.009833948717499999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00361180472,
"min": 0.00361180472,
"max": 0.12124760117000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016395485028624535,
"min": 0.016395485028624535,
"max": 0.6447288990020752,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22953680157661438,
"min": 0.22953680157661438,
"max": 5.157831192016602,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 388.88461538461536,
"min": 388.88461538461536,
"max": 994.0967741935484,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30333.0,
"min": 16877.0,
"max": 32718.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5085179288990986,
"min": -0.8760118155794985,
"max": 1.5448540327315394,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 117.6643984541297,
"min": -26.841401614248753,
"max": 117.6643984541297,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5085179288990986,
"min": -0.8760118155794985,
"max": 1.5448540327315394,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 117.6643984541297,
"min": -26.841401614248753,
"max": 117.6643984541297,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06632601412293763,
"min": 0.06632601412293763,
"max": 12.765743225812912,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.173429101589136,
"min": 5.173429101589136,
"max": 217.0176348388195,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756670739",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756673017"
},
"total": 2277.624073287,
"count": 1,
"self": 0.6335467220001192,
"children": {
"run_training.setup": {
"total": 0.022016295999947033,
"count": 1,
"self": 0.022016295999947033
},
"TrainerController.start_learning": {
"total": 2276.968510269,
"count": 1,
"self": 1.666848202026813,
"children": {
"TrainerController._reset_env": {
"total": 2.039077645999896,
"count": 1,
"self": 2.039077645999896
},
"TrainerController.advance": {
"total": 2273.1804687629733,
"count": 63654,
"self": 1.7411090220166443,
"children": {
"env_step": {
"total": 1607.3119479029374,
"count": 63654,
"self": 1441.0718877539375,
"children": {
"SubprocessEnvManager._take_step": {
"total": 165.2526353860344,
"count": 63654,
"self": 4.955970529027809,
"children": {
"TorchPolicy.evaluate": {
"total": 160.2966648570066,
"count": 62551,
"self": 160.2966648570066
}
}
},
"workers": {
"total": 0.9874247629654747,
"count": 63654,
"self": 0.0,
"children": {
"worker_root": {
"total": 2270.9054802189626,
"count": 63654,
"is_parallel": true,
"self": 955.1025025929837,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018845340000552824,
"count": 1,
"is_parallel": true,
"self": 0.0006132830001206457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012712509999346366,
"count": 8,
"is_parallel": true,
"self": 0.0012712509999346366
}
}
},
"UnityEnvironment.step": {
"total": 0.04784972299989931,
"count": 1,
"is_parallel": true,
"self": 0.0005170669999188249,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000482414000089193,
"count": 1,
"is_parallel": true,
"self": 0.000482414000089193
},
"communicator.exchange": {
"total": 0.045157375999906435,
"count": 1,
"is_parallel": true,
"self": 0.045157375999906435
},
"steps_from_proto": {
"total": 0.0016928659999848605,
"count": 1,
"is_parallel": true,
"self": 0.0003634299998793722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013294360001054883,
"count": 8,
"is_parallel": true,
"self": 0.0013294360001054883
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1315.8029776259789,
"count": 63653,
"is_parallel": true,
"self": 33.01333987995213,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.363504780031462,
"count": 63653,
"is_parallel": true,
"self": 23.363504780031462
},
"communicator.exchange": {
"total": 1158.8700900640092,
"count": 63653,
"is_parallel": true,
"self": 1158.8700900640092
},
"steps_from_proto": {
"total": 100.55604290198607,
"count": 63653,
"is_parallel": true,
"self": 21.078254047820337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.47778885416574,
"count": 509224,
"is_parallel": true,
"self": 79.47778885416574
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 664.1274118380193,
"count": 63654,
"self": 3.303269883087978,
"children": {
"process_trajectory": {
"total": 128.1322529939314,
"count": 63654,
"self": 127.82176318693087,
"children": {
"RLTrainer._checkpoint": {
"total": 0.310489807000522,
"count": 2,
"self": 0.310489807000522
}
}
},
"_update_policy": {
"total": 532.6918889609999,
"count": 461,
"self": 294.49688787401374,
"children": {
"TorchPPOOptimizer.update": {
"total": 238.19500108698617,
"count": 22812,
"self": 238.19500108698617
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.039999895321671e-07,
"count": 1,
"self": 9.039999895321671e-07
},
"TrainerController._save_models": {
"total": 0.0821147540000311,
"count": 1,
"self": 0.0013446059997477278,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08077014800028337,
"count": 1,
"self": 0.08077014800028337
}
}
}
}
}
}
}