Pyramids / run_logs /timers.json
ale2x72's picture
First Push
b40d319 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7286418676376343,
"min": 0.6391339898109436,
"max": 1.484389066696167,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 21870.9140625,
"min": 18989.94921875,
"max": 45030.42578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989891.0,
"min": 29952.0,
"max": 989891.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.05668890103697777,
"min": -0.17862895131111145,
"max": 0.09046962112188339,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 13.88878059387207,
"min": -42.335060119628906,
"max": 22.436466217041016,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.031163666397333145,
"min": 0.005397243425250053,
"max": 0.37113887071609497,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.635098457336426,
"min": 1.3277218341827393,
"max": 89.44446563720703,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06320995998666498,
"min": 0.06320995998666498,
"max": 0.07316363093226842,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8849394398133097,
"min": 0.48649473633281304,
"max": 1.0423098736244034,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.005269541817481286,
"min": 0.0002284143478532637,
"max": 0.008819635463935981,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.073773585444738,
"min": 0.002969386522092428,
"max": 0.1130588423209024,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.201361885292856e-06,
"min": 7.201361885292856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010081906639409997,
"min": 0.00010081906639409997,
"max": 0.0035075207308265,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10240042142857143,
"min": 0.10240042142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4336059,
"min": 1.3886848,
"max": 2.5691735000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002498021007142857,
"min": 0.0002498021007142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0034972294099999994,
"min": 0.0034972294099999994,
"max": 0.11694043265000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014499842189252377,
"min": 0.014499842189252377,
"max": 0.6146382689476013,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20299778878688812,
"min": 0.20299778878688812,
"max": 4.3024678230285645,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 814.1714285714286,
"min": 766.3809523809524,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28496.0,
"min": 15984.0,
"max": 33404.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.0426171002643449,
"min": -1.0000000521540642,
"max": 0.32856662127943265,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 1.4915985092520714,
"min": -29.98920153081417,
"max": 13.799798093736172,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.0426171002643449,
"min": -1.0000000521540642,
"max": 0.32856662127943265,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 1.4915985092520714,
"min": -29.98920153081417,
"max": 13.799798093736172,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.12677596508458788,
"min": 0.11418206793245017,
"max": 12.908542780205607,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.437158777960576,
"min": 4.437158777960576,
"max": 206.53668448328972,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1770809144",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1770811337"
},
"total": 2192.789102951,
"count": 1,
"self": 0.5794443009999668,
"children": {
"run_training.setup": {
"total": 0.0334163620000254,
"count": 1,
"self": 0.0334163620000254
},
"TrainerController.start_learning": {
"total": 2192.176242288,
"count": 1,
"self": 1.4430577250518581,
"children": {
"TrainerController._reset_env": {
"total": 2.111284307999995,
"count": 1,
"self": 2.111284307999995
},
"TrainerController.advance": {
"total": 2188.541236711948,
"count": 63272,
"self": 1.5544894070135342,
"children": {
"env_step": {
"total": 1532.1147258749527,
"count": 63272,
"self": 1376.21295500988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 155.06063339904972,
"count": 63272,
"self": 4.725506347033843,
"children": {
"TorchPolicy.evaluate": {
"total": 150.33512705201588,
"count": 62558,
"self": 150.33512705201588
}
}
},
"workers": {
"total": 0.8411374660229285,
"count": 63272,
"self": 0.0,
"children": {
"worker_root": {
"total": 2185.723474282982,
"count": 63272,
"is_parallel": true,
"self": 928.2884564669446,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001871465000021999,
"count": 1,
"is_parallel": true,
"self": 0.00059826200003954,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001273202999982459,
"count": 8,
"is_parallel": true,
"self": 0.001273202999982459
}
}
},
"UnityEnvironment.step": {
"total": 0.05526827800008505,
"count": 1,
"is_parallel": true,
"self": 0.0005328290001216374,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005626920001304825,
"count": 1,
"is_parallel": true,
"self": 0.0005626920001304825
},
"communicator.exchange": {
"total": 0.052606104999995296,
"count": 1,
"is_parallel": true,
"self": 0.052606104999995296
},
"steps_from_proto": {
"total": 0.0015666519998376316,
"count": 1,
"is_parallel": true,
"self": 0.0003440919995227887,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001222560000314843,
"count": 8,
"is_parallel": true,
"self": 0.001222560000314843
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1257.4350178160373,
"count": 63271,
"is_parallel": true,
"self": 34.893287354094355,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.686142370937205,
"count": 63271,
"is_parallel": true,
"self": 23.686142370937205
},
"communicator.exchange": {
"total": 1088.249003496027,
"count": 63271,
"is_parallel": true,
"self": 1088.249003496027
},
"steps_from_proto": {
"total": 110.60658459497881,
"count": 63271,
"is_parallel": true,
"self": 23.099915793805394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.50666880117342,
"count": 506168,
"is_parallel": true,
"self": 87.50666880117342
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 654.8720214299819,
"count": 63272,
"self": 2.6665097129493915,
"children": {
"process_trajectory": {
"total": 122.5564371890348,
"count": 63272,
"self": 122.36616655903458,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19027063000021371,
"count": 2,
"self": 0.19027063000021371
}
}
},
"_update_policy": {
"total": 529.6490745279978,
"count": 448,
"self": 294.13856702098724,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.51050750701052,
"count": 22758,
"self": 235.51050750701052
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.180002962239087e-07,
"count": 1,
"self": 9.180002962239087e-07
},
"TrainerController._save_models": {
"total": 0.08066262500005905,
"count": 1,
"self": 0.001048835000347026,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07961378999971203,
"count": 1,
"self": 0.07961378999971203
}
}
}
}
}
}
}