ppo-Pyramids / run_logs /timers.json
HarryStot's picture
First Push
de90ca2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35965588688850403,
"min": 0.35965588688850403,
"max": 1.4686775207519531,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10755.1494140625,
"min": 10755.1494140625,
"max": 44553.80078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989957.0,
"min": 29952.0,
"max": 989957.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989957.0,
"min": 29952.0,
"max": 989957.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5722624063491821,
"min": -0.10666599124670029,
"max": 0.6460117697715759,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 158.51669311523438,
"min": -25.813169479370117,
"max": 185.40538024902344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0067070769146084785,
"min": -0.01521294191479683,
"max": 0.2853538990020752,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8578603267669678,
"min": -4.198771953582764,
"max": 67.62887573242188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06953370519165349,
"min": 0.06561430558698098,
"max": 0.07420820221748381,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9734718726831488,
"min": 0.4771865699078952,
"max": 1.0637702220313563,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014735080271618376,
"min": 0.0011499833945067168,
"max": 0.017138565461168747,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20629112380265727,
"min": 0.013799800734080601,
"max": 0.24266550426060954,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.390961822092852e-06,
"min": 7.390961822092852e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010347346550929992,
"min": 0.00010347346550929992,
"max": 0.0036340006886664993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246362142857145,
"min": 0.10246362142857145,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344907000000002,
"min": 1.3886848,
"max": 2.6113335,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002561157807142856,
"min": 0.0002561157807142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035856209299999983,
"min": 0.0035856209299999983,
"max": 0.12115221665,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008464858867228031,
"min": 0.008464858867228031,
"max": 0.3671267628669739,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11850801855325699,
"min": 0.11850801855325699,
"max": 2.569887399673462,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 326.4891304347826,
"min": 292.88,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30037.0,
"min": 15984.0,
"max": 33216.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5647782482530759,
"min": -1.0000000521540642,
"max": 1.6707959860563277,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 143.959598839283,
"min": -29.2480016797781,
"max": 167.07959860563278,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5647782482530759,
"min": -1.0000000521540642,
"max": 1.6707959860563277,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 143.959598839283,
"min": -29.2480016797781,
"max": 167.07959860563278,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028762343721767968,
"min": 0.028237783641970687,
"max": 8.277214052155614,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.646135622402653,
"min": 2.646135622402653,
"max": 132.43542483448982,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1757927769",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1757930003"
},
"total": 2233.447151843,
"count": 1,
"self": 0.4749741810001069,
"children": {
"run_training.setup": {
"total": 0.02235338600007708,
"count": 1,
"self": 0.02235338600007708
},
"TrainerController.start_learning": {
"total": 2232.949824276,
"count": 1,
"self": 1.2866556579974713,
"children": {
"TrainerController._reset_env": {
"total": 2.0397291340000265,
"count": 1,
"self": 2.0397291340000265
},
"TrainerController.advance": {
"total": 2229.534256068003,
"count": 64029,
"self": 1.3160116301019116,
"children": {
"env_step": {
"total": 1580.7328833799586,
"count": 64029,
"self": 1436.5913425829708,
"children": {
"SubprocessEnvManager._take_step": {
"total": 143.32045903498283,
"count": 64029,
"self": 4.452831958011529,
"children": {
"TorchPolicy.evaluate": {
"total": 138.8676270769713,
"count": 62554,
"self": 138.8676270769713
}
}
},
"workers": {
"total": 0.8210817620049511,
"count": 64029,
"self": 0.0,
"children": {
"worker_root": {
"total": 2228.225531045032,
"count": 64029,
"is_parallel": true,
"self": 901.2289121380134,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017677050000202144,
"count": 1,
"is_parallel": true,
"self": 0.0005913880006573891,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011763169993628253,
"count": 8,
"is_parallel": true,
"self": 0.0011763169993628253
}
}
},
"UnityEnvironment.step": {
"total": 0.048361927000087235,
"count": 1,
"is_parallel": true,
"self": 0.0005170050001197524,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045357799990597414,
"count": 1,
"is_parallel": true,
"self": 0.00045357799990597414
},
"communicator.exchange": {
"total": 0.04583592900007716,
"count": 1,
"is_parallel": true,
"self": 0.04583592900007716
},
"steps_from_proto": {
"total": 0.0015554149999843503,
"count": 1,
"is_parallel": true,
"self": 0.0003555430002961657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011998719996881846,
"count": 8,
"is_parallel": true,
"self": 0.0011998719996881846
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1326.9966189070185,
"count": 64028,
"is_parallel": true,
"self": 31.299613379061157,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.390515943048968,
"count": 64028,
"is_parallel": true,
"self": 22.390515943048968
},
"communicator.exchange": {
"total": 1178.6922559668978,
"count": 64028,
"is_parallel": true,
"self": 1178.6922559668978
},
"steps_from_proto": {
"total": 94.61423361801053,
"count": 64028,
"is_parallel": true,
"self": 18.920381938037735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.6938516799728,
"count": 512224,
"is_parallel": true,
"self": 75.6938516799728
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 647.4853610579423,
"count": 64029,
"self": 2.5008711868549653,
"children": {
"process_trajectory": {
"total": 123.69222133708422,
"count": 64029,
"self": 123.48987417308467,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20234716399954777,
"count": 2,
"self": 0.20234716399954777
}
}
},
"_update_policy": {
"total": 521.2922685340031,
"count": 454,
"self": 290.97091004099434,
"children": {
"TorchPPOOptimizer.update": {
"total": 230.3213584930088,
"count": 22794,
"self": 230.3213584930088
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.139998837781604e-07,
"count": 1,
"self": 9.139998837781604e-07
},
"TrainerController._save_models": {
"total": 0.08918250199985778,
"count": 1,
"self": 0.0013128169998708472,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08786968499998693,
"count": 1,
"self": 0.08786968499998693
}
}
}
}
}
}
}