ppo-pyramids / run_logs /timers.json
pb96's picture
first_run
507446f verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5365580320358276,
"min": 0.5232399106025696,
"max": 1.3608429431915283,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 16148.25,
"min": 15596.734375,
"max": 41282.53125,
"count": 10
},
"Pyramids.Step.mean": {
"value": 299955.0,
"min": 29982.0,
"max": 299955.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 299955.0,
"min": 29982.0,
"max": 299955.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.0520547553896904,
"min": -0.2568754255771637,
"max": -0.026195399463176727,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -12.597250938415527,
"min": -61.136348724365234,
"max": -6.339286804199219,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.09898155927658081,
"min": 0.09898155927658081,
"max": 0.4925316870212555,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 23.953536987304688,
"min": 23.953536987304688,
"max": 117.22254180908203,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06732379952619272,
"min": 0.06406305936507858,
"max": 0.07085190431388019,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.942533193366698,
"min": 0.5668152345110415,
"max": 0.9767627690939097,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.002732082510524238,
"min": 0.000364879269195584,
"max": 0.011824381959767871,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.038249155147339335,
"min": 0.004378551230347008,
"max": 0.09459505567814297,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.3958023918785713e-05,
"min": 1.3958023918785713e-05,
"max": 0.00028283675572108333,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000195412334863,
"min": 0.000195412334863,
"max": 0.0027056562981146674,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10465264285714286,
"min": 0.10465264285714286,
"max": 0.1942789166666667,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4651370000000001,
"min": 1.4651370000000001,
"max": 2.101885333333333,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0004747990214285714,
"min": 0.0004747990214285714,
"max": 0.009428463775000001,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0066471863,
"min": 0.0066471863,
"max": 0.0902183448,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.09065902233123779,
"min": 0.09065902233123779,
"max": 0.5422310829162598,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.269226312637329,
"min": 1.269226312637329,
"max": 4.337848663330078,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 936.2857142857143,
"min": 903.7777777777778,
"max": 999.0,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32770.0,
"min": 16397.0,
"max": 32770.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.6512114773903575,
"min": -0.999962551984936,
"max": -0.45991115547992567,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -22.79240170866251,
"min": -31.998801663517952,
"max": -12.417601197957993,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.6512114773903575,
"min": -0.999962551984936,
"max": -0.45991115547992567,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -22.79240170866251,
"min": -31.998801663517952,
"max": -12.417601197957993,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.8861063092947006,
"min": 0.8682979034535863,
"max": 11.374454057391953,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 31.013720825314522,
"min": 23.44404339324683,
"max": 193.36571897566319,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751681903",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751682811"
},
"total": 908.7471445470001,
"count": 1,
"self": 0.6318890809998265,
"children": {
"run_training.setup": {
"total": 0.02923407799994493,
"count": 1,
"self": 0.02923407799994493
},
"TrainerController.start_learning": {
"total": 908.0860213880003,
"count": 1,
"self": 0.6866895519713125,
"children": {
"TrainerController._reset_env": {
"total": 2.9799924940000437,
"count": 1,
"self": 2.9799924940000437
},
"TrainerController.advance": {
"total": 904.300296584029,
"count": 18901,
"self": 0.8107115460147725,
"children": {
"env_step": {
"total": 591.4039045279828,
"count": 18901,
"self": 539.5200940439711,
"children": {
"SubprocessEnvManager._take_step": {
"total": 51.462134163011115,
"count": 18901,
"self": 2.2162769989879507,
"children": {
"TorchPolicy.evaluate": {
"total": 49.245857164023164,
"count": 18802,
"self": 49.245857164023164
}
}
},
"workers": {
"total": 0.421676321000632,
"count": 18901,
"self": 0.0,
"children": {
"worker_root": {
"total": 905.4020135499843,
"count": 18901,
"is_parallel": true,
"self": 421.0352574849801,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002168212000015046,
"count": 1,
"is_parallel": true,
"self": 0.0006543320000673702,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001513879999947676,
"count": 8,
"is_parallel": true,
"self": 0.001513879999947676
}
}
},
"UnityEnvironment.step": {
"total": 0.13292974000000868,
"count": 1,
"is_parallel": true,
"self": 0.0006602949999887642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004706429999714601,
"count": 1,
"is_parallel": true,
"self": 0.0004706429999714601
},
"communicator.exchange": {
"total": 0.1300777639999069,
"count": 1,
"is_parallel": true,
"self": 0.1300777639999069
},
"steps_from_proto": {
"total": 0.0017210380001415615,
"count": 1,
"is_parallel": true,
"self": 0.00036029099987899826,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013607470002625632,
"count": 8,
"is_parallel": true,
"self": 0.0013607470002625632
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 484.3667560650042,
"count": 18900,
"is_parallel": true,
"self": 13.659147757990013,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.229370942995956,
"count": 18900,
"is_parallel": true,
"self": 9.229370942995956
},
"communicator.exchange": {
"total": 422.84480860299936,
"count": 18900,
"is_parallel": true,
"self": 422.84480860299936
},
"steps_from_proto": {
"total": 38.63342876101888,
"count": 18900,
"is_parallel": true,
"self": 8.273840807959914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.359587953058963,
"count": 151200,
"is_parallel": true,
"self": 30.359587953058963
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 312.08568051003135,
"count": 18901,
"self": 1.101801586037709,
"children": {
"process_trajectory": {
"total": 48.80315229599387,
"count": 18901,
"self": 48.80315229599387
},
"_update_policy": {
"total": 262.1807266279998,
"count": 123,
"self": 101.51770031397655,
"children": {
"TorchPPOOptimizer.update": {
"total": 160.66302631402323,
"count": 6897,
"self": 160.66302631402323
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4649999684479553e-06,
"count": 1,
"self": 1.4649999684479553e-06
},
"TrainerController._save_models": {
"total": 0.11904129300000932,
"count": 1,
"self": 0.00196028899972589,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11708100400028343,
"count": 1,
"self": 0.11708100400028343
}
}
}
}
}
}
}