ppo-PyramidsRND / run_logs /timers.json
abkimc's picture
First Push
bbe1ef6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32393208146095276,
"min": 0.3231879770755768,
"max": 1.4191868305206299,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9692.0478515625,
"min": 9692.0478515625,
"max": 43052.453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989992.0,
"min": 29884.0,
"max": 989992.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989992.0,
"min": 29884.0,
"max": 989992.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5830085873603821,
"min": -0.2062370479106903,
"max": 0.6356749534606934,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 164.40841674804688,
"min": -48.87818145751953,
"max": 181.80303955078125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011067250743508339,
"min": 0.004757442977279425,
"max": 0.40152809023857117,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.120964765548706,
"min": 1.3606287240982056,
"max": 95.16215515136719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06927017017692544,
"min": 0.06635203127266973,
"max": 0.07379447185262204,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9697823824769561,
"min": 0.47727777860053794,
"max": 1.0782988368871382,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01638588905468171,
"min": 0.0014951874225200053,
"max": 0.017512501658486494,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22940244676554394,
"min": 0.020723027916212118,
"max": 0.24550682296588394,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3764118269428575e-06,
"min": 7.3764118269428575e-06,
"max": 0.0002952358301594857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001032697655772,
"min": 0.0001032697655772,
"max": 0.0035079725306758998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245877142857143,
"min": 0.10245877142857143,
"max": 0.19841194285714286,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344228,
"min": 1.3888836,
"max": 2.5693241,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002556312657142857,
"min": 0.0002556312657142857,
"max": 0.00984135309142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00357883772,
"min": 0.00357883772,
"max": 0.11695547759000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01702156849205494,
"min": 0.01702156849205494,
"max": 0.5125555992126465,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23830196261405945,
"min": 0.23830196261405945,
"max": 3.5878891944885254,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 319.78125,
"min": 285.7980769230769,
"max": 994.9655172413793,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30699.0,
"min": 16683.0,
"max": 32676.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6616474053601629,
"min": -0.9305273228974054,
"max": 1.6644470402423073,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 161.1797983199358,
"min": -30.707401655614376,
"max": 170.6001977622509,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6616474053601629,
"min": -0.9305273228974054,
"max": 1.6644470402423073,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 161.1797983199358,
"min": -30.707401655614376,
"max": 170.6001977622509,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.055694247042693046,
"min": 0.0529501591713023,
"max": 9.787382441408495,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.402341963141225,
"min": 5.267698702169582,
"max": 166.3855015039444,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751014224",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751016442"
},
"total": 2218.2068641839996,
"count": 1,
"self": 0.7831243599994195,
"children": {
"run_training.setup": {
"total": 0.020139125000014246,
"count": 1,
"self": 0.020139125000014246
},
"TrainerController.start_learning": {
"total": 2217.403600699,
"count": 1,
"self": 1.3160952080324932,
"children": {
"TrainerController._reset_env": {
"total": 2.766024604999984,
"count": 1,
"self": 2.766024604999984
},
"TrainerController.advance": {
"total": 2213.2385254549677,
"count": 63956,
"self": 1.412310860013804,
"children": {
"env_step": {
"total": 1554.603370003968,
"count": 63956,
"self": 1409.1764782319601,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.6368924710389,
"count": 63956,
"self": 4.484133248029366,
"children": {
"TorchPolicy.evaluate": {
"total": 140.15275922300953,
"count": 62555,
"self": 140.15275922300953
}
}
},
"workers": {
"total": 0.7899993009689297,
"count": 63956,
"self": 0.0,
"children": {
"worker_root": {
"total": 2212.378362536015,
"count": 63956,
"is_parallel": true,
"self": 913.5965005950386,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001789786000017557,
"count": 1,
"is_parallel": true,
"self": 0.0005904239999381389,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001199362000079418,
"count": 8,
"is_parallel": true,
"self": 0.001199362000079418
}
}
},
"UnityEnvironment.step": {
"total": 0.059341933000041536,
"count": 1,
"is_parallel": true,
"self": 0.000517718000082823,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005030879999594617,
"count": 1,
"is_parallel": true,
"self": 0.0005030879999594617
},
"communicator.exchange": {
"total": 0.05666965000000346,
"count": 1,
"is_parallel": true,
"self": 0.05666965000000346
},
"steps_from_proto": {
"total": 0.0016514769999957934,
"count": 1,
"is_parallel": true,
"self": 0.00036520500020742475,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012862719997883687,
"count": 8,
"is_parallel": true,
"self": 0.0012862719997883687
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1298.7818619409766,
"count": 63955,
"is_parallel": true,
"self": 31.81863384696794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.963961789075938,
"count": 63955,
"is_parallel": true,
"self": 21.963961789075938
},
"communicator.exchange": {
"total": 1151.4635371699828,
"count": 63955,
"is_parallel": true,
"self": 1151.4635371699828
},
"steps_from_proto": {
"total": 93.53572913494986,
"count": 63955,
"is_parallel": true,
"self": 18.590243955512506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.94548517943736,
"count": 511640,
"is_parallel": true,
"self": 74.94548517943736
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 657.2228445909861,
"count": 63956,
"self": 2.6083044900274217,
"children": {
"process_trajectory": {
"total": 126.28128296495379,
"count": 63956,
"self": 126.08833177095403,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19295119399976102,
"count": 2,
"self": 0.19295119399976102
}
}
},
"_update_policy": {
"total": 528.3332571360049,
"count": 456,
"self": 292.5590563640244,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.77420077198053,
"count": 22815,
"self": 235.77420077198053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0439998732181266e-06,
"count": 1,
"self": 1.0439998732181266e-06
},
"TrainerController._save_models": {
"total": 0.08295438699997248,
"count": 1,
"self": 0.0012567100002343068,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08169767699973818,
"count": 1,
"self": 0.08169767699973818
}
}
}
}
}
}
}