pyramids / run_logs /timers.json
ltaylor48's picture
First Push
605bd39 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.337991327047348,
"min": 0.32362666726112366,
"max": 1.429599642753601,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10069.4375,
"min": 9667.3759765625,
"max": 43368.3359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989899.0,
"min": 29952.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989899.0,
"min": 29952.0,
"max": 989899.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5747349858283997,
"min": -0.09383946657180786,
"max": 0.6817117929458618,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.20159912109375,
"min": -22.615310668945312,
"max": 199.05984497070312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022336522117257118,
"min": -0.023608794435858727,
"max": 0.3244560956954956,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.187216758728027,
"min": -6.516027450561523,
"max": 76.8960952758789,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06406502446671035,
"min": 0.06406502446671035,
"max": 0.07326739686958582,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.896910342533945,
"min": 0.5116860659894381,
"max": 1.038585924857305,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01587313274363873,
"min": 0.0006414991818305822,
"max": 0.017217774842975007,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22222385841094222,
"min": 0.00898098854562815,
"max": 0.25297661428824847,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.598690324278573e-06,
"min": 7.598690324278573e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010638166453990002,
"min": 0.00010638166453990002,
"max": 0.0035071247309584994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253286428571429,
"min": 0.10253286428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354601,
"min": 1.3886848,
"max": 2.5690415000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002630331421428572,
"min": 0.0002630331421428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003682463990000001,
"min": 0.003682463990000001,
"max": 0.11692724584999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010160798206925392,
"min": 0.010160798206925392,
"max": 0.3477104604244232,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1422511786222458,
"min": 0.1422511786222458,
"max": 2.4339733123779297,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 331.43529411764706,
"min": 284.25892857142856,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28172.0,
"min": 15984.0,
"max": 33695.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5744329219355302,
"min": -1.0000000521540642,
"max": 1.7157410600089602,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 133.82679836452007,
"min": -29.348001822829247,
"max": 192.16299872100353,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5744329219355302,
"min": -1.0000000521540642,
"max": 1.7157410600089602,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 133.82679836452007,
"min": -29.348001822829247,
"max": 192.16299872100353,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03498028784045292,
"min": 0.0301760831732411,
"max": 7.083838961087167,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9733244664384983,
"min": 2.9733244664384983,
"max": 113.34142337739468,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745613019",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1745615364"
},
"total": 2345.013278622,
"count": 1,
"self": 0.47705887799975244,
"children": {
"run_training.setup": {
"total": 0.020586244000242004,
"count": 1,
"self": 0.020586244000242004
},
"TrainerController.start_learning": {
"total": 2344.5156335,
"count": 1,
"self": 1.623538544976327,
"children": {
"TrainerController._reset_env": {
"total": 2.299355853000179,
"count": 1,
"self": 2.299355853000179
},
"TrainerController.advance": {
"total": 2340.509320675024,
"count": 63915,
"self": 1.7660081621447716,
"children": {
"env_step": {
"total": 1658.5925450199084,
"count": 63915,
"self": 1489.9788254348564,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.6618199039708,
"count": 63915,
"self": 5.039487168022333,
"children": {
"TorchPolicy.evaluate": {
"total": 162.62233273594848,
"count": 62556,
"self": 162.62233273594848
}
}
},
"workers": {
"total": 0.9518996810811586,
"count": 63915,
"self": 0.0,
"children": {
"worker_root": {
"total": 2338.7097311419766,
"count": 63915,
"is_parallel": true,
"self": 973.5214288439092,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020808320000469394,
"count": 1,
"is_parallel": true,
"self": 0.0007549079996351793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00132592400041176,
"count": 8,
"is_parallel": true,
"self": 0.00132592400041176
}
}
},
"UnityEnvironment.step": {
"total": 0.05006581700035895,
"count": 1,
"is_parallel": true,
"self": 0.0005609780000668252,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004984240003977902,
"count": 1,
"is_parallel": true,
"self": 0.0004984240003977902
},
"communicator.exchange": {
"total": 0.04735191999998278,
"count": 1,
"is_parallel": true,
"self": 0.04735191999998278
},
"steps_from_proto": {
"total": 0.0016544949999115488,
"count": 1,
"is_parallel": true,
"self": 0.0003517240002111066,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013027709997004422,
"count": 8,
"is_parallel": true,
"self": 0.0013027709997004422
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1365.1883022980674,
"count": 63914,
"is_parallel": true,
"self": 32.937474656003815,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.268288141044195,
"count": 63914,
"is_parallel": true,
"self": 24.268288141044195
},
"communicator.exchange": {
"total": 1206.3647111661098,
"count": 63914,
"is_parallel": true,
"self": 1206.3647111661098
},
"steps_from_proto": {
"total": 101.61782833490952,
"count": 63914,
"is_parallel": true,
"self": 21.380241917970125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.2375864169394,
"count": 511312,
"is_parallel": true,
"self": 80.2375864169394
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 680.1507674929708,
"count": 63915,
"self": 2.987410464995264,
"children": {
"process_trajectory": {
"total": 134.04972926796654,
"count": 63915,
"self": 133.84356845096636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20616081700018185,
"count": 2,
"self": 0.20616081700018185
}
}
},
"_update_policy": {
"total": 543.113627760009,
"count": 450,
"self": 300.22933849297306,
"children": {
"TorchPPOOptimizer.update": {
"total": 242.8842892670359,
"count": 22818,
"self": 242.8842892670359
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.79000105871819e-07,
"count": 1,
"self": 9.79000105871819e-07
},
"TrainerController._save_models": {
"total": 0.08341744799963635,
"count": 1,
"self": 0.0015959559996190364,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08182149200001732,
"count": 1,
"self": 0.08182149200001732
}
}
}
}
}
}
}