ahmedafifi123's picture
First Push
3ed291e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.08631689846515656,
"min": 0.0843723863363266,
"max": 1.3741669654846191,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 2585.36376953125,
"min": 2527.121826171875,
"max": 41686.73046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989895.0,
"min": 29910.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989895.0,
"min": 29910.0,
"max": 989895.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09077170491218567,
"min": -0.10533519834280014,
"max": -0.00047584527055732906,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -21.78520965576172,
"min": -25.280447006225586,
"max": -0.11277532577514648,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.6536656618118286,
"min": 0.49741488695144653,
"max": 0.6628867983818054,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 156.8797607421875,
"min": 117.8873291015625,
"max": 159.09283447265625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06704746414256721,
"min": 0.06651772349153243,
"max": 0.0749196238989595,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.938664497995941,
"min": 0.5244373672927165,
"max": 0.9913632133497537,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0013481241059302759,
"min": 0.00025611136050515817,
"max": 0.008759250811447695,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.018873737483023862,
"min": 0.003585559047072214,
"max": 0.061314755680133864,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.76000455622143e-06,
"min": 7.76000455622143e-06,
"max": 0.0002952347158741428,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010864006378710001,
"min": 0.00010864006378710001,
"max": 0.0034931026356324994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258663571428571,
"min": 0.10258663571428571,
"max": 0.19841157142857144,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4362129,
"min": 1.388881,
"max": 2.4643675000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026840490785714295,
"min": 0.00026840490785714295,
"max": 0.009841315985714286,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003757668710000001,
"min": 0.003757668710000001,
"max": 0.11645031324999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.6511731147766113,
"min": 0.5017501711845398,
"max": 0.656827449798584,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 9.116423606872559,
"min": 3.8151755332946777,
"max": 9.195584297180176,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 969.71875,
"min": 912.9411764705883,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31031.0,
"min": 16581.0,
"max": 32557.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.8453187975101173,
"min": -0.9999125520698726,
"max": -0.6339486181735993,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -27.050201520323753,
"min": -31.997201666235924,
"max": -14.597000896930695,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.8453187975101173,
"min": -0.9999125520698726,
"max": -0.6339486181735993,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -27.050201520323753,
"min": -31.997201666235924,
"max": -14.597000896930695,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 6.335959123447537,
"min": 4.762054389452233,
"max": 8.928904919063344,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 202.7506919503212,
"min": 124.4823949933052,
"max": 207.30974233150482,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1738945684",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1738948242"
},
"total": 2558.2735613310006,
"count": 1,
"self": 0.6443359860004421,
"children": {
"run_training.setup": {
"total": 0.032106014999953914,
"count": 1,
"self": 0.032106014999953914
},
"TrainerController.start_learning": {
"total": 2557.59711933,
"count": 1,
"self": 2.29046529797688,
"children": {
"TrainerController._reset_env": {
"total": 2.8654860930000723,
"count": 1,
"self": 2.8654860930000723
},
"TrainerController.advance": {
"total": 2552.3434062580236,
"count": 62919,
"self": 2.2648396070062518,
"children": {
"env_step": {
"total": 1532.3922718760032,
"count": 62919,
"self": 1376.8568209279429,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.2586397999985,
"count": 62919,
"self": 6.612095845212934,
"children": {
"TorchPolicy.evaluate": {
"total": 147.64654395478556,
"count": 62558,
"self": 147.64654395478556
}
}
},
"workers": {
"total": 1.2768111480618245,
"count": 62919,
"self": 0.0,
"children": {
"worker_root": {
"total": 2551.149525607083,
"count": 62919,
"is_parallel": true,
"self": 1334.2293891610652,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033978820001721033,
"count": 1,
"is_parallel": true,
"self": 0.0011836109997602762,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002214271000411827,
"count": 8,
"is_parallel": true,
"self": 0.002214271000411827
}
}
},
"UnityEnvironment.step": {
"total": 0.06404912799916929,
"count": 1,
"is_parallel": true,
"self": 0.0007139229983295081,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005307410001478274,
"count": 1,
"is_parallel": true,
"self": 0.0005307410001478274
},
"communicator.exchange": {
"total": 0.06079819100068562,
"count": 1,
"is_parallel": true,
"self": 0.06079819100068562
},
"steps_from_proto": {
"total": 0.002006273000006331,
"count": 1,
"is_parallel": true,
"self": 0.00041423899983783485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015920340001684963,
"count": 8,
"is_parallel": true,
"self": 0.0015920340001684963
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1216.9201364460178,
"count": 62918,
"is_parallel": true,
"self": 41.62290813011441,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.57761404307348,
"count": 62918,
"is_parallel": true,
"self": 29.57761404307348
},
"communicator.exchange": {
"total": 1026.6266863728133,
"count": 62918,
"is_parallel": true,
"self": 1026.6266863728133
},
"steps_from_proto": {
"total": 119.09292790001655,
"count": 62918,
"is_parallel": true,
"self": 25.59458359830205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 93.4983443017145,
"count": 503344,
"is_parallel": true,
"self": 93.4983443017145
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1017.6862947750142,
"count": 62919,
"self": 4.081787490072202,
"children": {
"process_trajectory": {
"total": 152.19132513693694,
"count": 62919,
"self": 151.91144841093683,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2798767260001114,
"count": 2,
"self": 0.2798767260001114
}
}
},
"_update_policy": {
"total": 861.413182148005,
"count": 444,
"self": 354.2487153989732,
"children": {
"TorchPPOOptimizer.update": {
"total": 507.1644667490318,
"count": 22776,
"self": 507.1644667490318
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1379997886251658e-06,
"count": 1,
"self": 1.1379997886251658e-06
},
"TrainerController._save_models": {
"total": 0.09776054299982206,
"count": 1,
"self": 0.0045322569994823425,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09322828600033972,
"count": 1,
"self": 0.09322828600033972
}
}
}
}
}
}
}