cashmerepancake's picture
First Push
922b5b1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9593636393547058,
"min": 0.9593636393547058,
"max": 1.4812427759170532,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 28734.859375,
"min": 28734.859375,
"max": 44934.98046875,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89919.0,
"min": 29952.0,
"max": 89919.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89919.0,
"min": 29952.0,
"max": 89919.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07940179854631424,
"min": -0.12243341654539108,
"max": -0.07940179854631424,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -19.135833740234375,
"min": -29.38401985168457,
"max": -19.135833740234375,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.23632477223873138,
"min": 0.23632477223873138,
"max": 0.4205045998096466,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 56.95426940917969,
"min": 56.95426940917969,
"max": 99.65959167480469,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06602938349133491,
"min": 0.06602938349133491,
"max": 0.0764392531410822,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5942644514220142,
"min": 0.4630148509081089,
"max": 0.5942644514220142,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0019646780074602144,
"min": 0.0009226359610325798,
"max": 0.012670879254501072,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.01768210206714193,
"min": 0.006458451727228059,
"max": 0.08869615478150751,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.677107440966665e-05,
"min": 7.677107440966665e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0006909396696869999,
"min": 0.0006909396696869999,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12559033333333336,
"min": 0.12559033333333336,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.1303130000000001,
"min": 1.0911359999999999,
"max": 1.2868480000000002,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0025664743,
"min": 0.0025664743,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0230982687,
"min": 0.0230982687,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.14726664125919342,
"min": 0.14726664125919342,
"max": 0.6035156846046448,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.3253997564315796,
"min": 1.3253997564315796,
"max": 4.224609851837158,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 970.6060606060606,
"min": 970.6060606060606,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32030.0,
"min": 15984.0,
"max": 32030.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8503333860726068,
"min": -1.0000000521540642,
"max": -0.8503333860726068,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -28.061001740396023,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8503333860726068,
"min": -1.0000000521540642,
"max": -0.8503333860726068,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -28.061001740396023,
"min": -32.000001668930054,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.7025038073911811,
"min": 1.7025038073911811,
"max": 12.664504209533334,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 56.18262564390898,
"min": 56.18262564390898,
"max": 202.63206735253334,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1749703399",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1749703686"
},
"total": 286.33058713800006,
"count": 1,
"self": 0.7987195210000664,
"children": {
"run_training.setup": {
"total": 0.02655350300005921,
"count": 1,
"self": 0.02655350300005921
},
"TrainerController.start_learning": {
"total": 285.50531411399993,
"count": 1,
"self": 0.19223510001006616,
"children": {
"TrainerController._reset_env": {
"total": 2.531632963999982,
"count": 1,
"self": 2.531632963999982
},
"TrainerController.advance": {
"total": 282.62453498298987,
"count": 6259,
"self": 0.20784723398969618,
"children": {
"env_step": {
"total": 179.99291133500378,
"count": 6259,
"self": 164.58100296001248,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15.300488886000494,
"count": 6259,
"self": 0.628837218983108,
"children": {
"TorchPolicy.evaluate": {
"total": 14.671651667017386,
"count": 6256,
"self": 14.671651667017386
}
}
},
"workers": {
"total": 0.1114194889908049,
"count": 6259,
"self": 0.0,
"children": {
"worker_root": {
"total": 284.23904409100123,
"count": 6259,
"is_parallel": true,
"self": 135.2688686599904,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022052639999401435,
"count": 1,
"is_parallel": true,
"self": 0.0006869999997434206,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001518264000196723,
"count": 8,
"is_parallel": true,
"self": 0.001518264000196723
}
}
},
"UnityEnvironment.step": {
"total": 0.09373088600000301,
"count": 1,
"is_parallel": true,
"self": 0.0006306579998636153,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005277410000417149,
"count": 1,
"is_parallel": true,
"self": 0.0005277410000417149
},
"communicator.exchange": {
"total": 0.09057322800003931,
"count": 1,
"is_parallel": true,
"self": 0.09057322800003931
},
"steps_from_proto": {
"total": 0.001999259000058373,
"count": 1,
"is_parallel": true,
"self": 0.0003969049996612739,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016023540003970993,
"count": 8,
"is_parallel": true,
"self": 0.0016023540003970993
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 148.97017543101083,
"count": 6258,
"is_parallel": true,
"self": 4.152346732032129,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.8690387879964874,
"count": 6258,
"is_parallel": true,
"self": 2.8690387879964874
},
"communicator.exchange": {
"total": 129.85904893598467,
"count": 6258,
"is_parallel": true,
"self": 129.85904893598467
},
"steps_from_proto": {
"total": 12.08974097499754,
"count": 6258,
"is_parallel": true,
"self": 2.5661412020249372,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.523599772972602,
"count": 50064,
"is_parallel": true,
"self": 9.523599772972602
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 102.42377641399639,
"count": 6259,
"self": 0.2501190079885873,
"children": {
"process_trajectory": {
"total": 14.861986098007264,
"count": 6259,
"self": 14.861986098007264
},
"_update_policy": {
"total": 87.31167130800054,
"count": 27,
"self": 34.52641496200545,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.785256345995094,
"count": 2289,
"self": 52.785256345995094
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2479999895731453e-06,
"count": 1,
"self": 1.2479999895731453e-06
},
"TrainerController._save_models": {
"total": 0.15690981900002043,
"count": 1,
"self": 0.005798025999865786,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15111179300015465,
"count": 1,
"self": 0.15111179300015465
}
}
}
}
}
}
}