ppo-Pyramids / run_logs /timers.json
MoElgouhary's picture
First Push
aaa7dbc verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7005102634429932,
"min": 0.7005102634429932,
"max": 1.4899113178253174,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20914.43359375,
"min": 20914.43359375,
"max": 45197.94921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989889.0,
"min": 29890.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989889.0,
"min": 29890.0,
"max": 989889.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3922243118286133,
"min": -0.178433358669281,
"max": 0.4292246401309967,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 104.3316650390625,
"min": -42.467140197753906,
"max": 114.17375183105469,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.025996580719947815,
"min": -0.03232503682374954,
"max": 0.26009249687194824,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.915090560913086,
"min": -8.275209426879883,
"max": 61.902015686035156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07249576760417162,
"min": 0.0639663716316411,
"max": 0.07452998307384538,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0149407464584026,
"min": 0.5799586704494593,
"max": 1.055467549809049,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014416007904093078,
"min": 7.864274907139885e-05,
"max": 0.014416007904093078,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2018241106573031,
"min": 0.0011009984869995838,
"max": 0.2018241106573031,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.259261865992856e-06,
"min": 7.259261865992856e-06,
"max": 0.0002947698392433875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010162966612389998,
"min": 0.00010162966612389998,
"max": 0.0033835835721389004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024197214285714,
"min": 0.1024197214285714,
"max": 0.19825661249999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338760999999998,
"min": 1.4338760999999998,
"max": 2.5278611,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002517301707142858,
"min": 0.0002517301707142858,
"max": 0.009825835588750002,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035242223900000006,
"min": 0.0035242223900000006,
"max": 0.11281332389000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008060975931584835,
"min": 0.008060975931584835,
"max": 0.3066112995147705,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11285366863012314,
"min": 0.11285366863012314,
"max": 2.452890396118164,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 442.01428571428573,
"min": 422.8787878787879,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30941.0,
"min": 17328.0,
"max": 32931.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4390666437321815,
"min": -0.9999419873760592,
"max": 1.4531044502978894,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 99.29559841752052,
"min": -30.998201608657837,
"max": 99.29559841752052,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4390666437321815,
"min": -0.9999419873760592,
"max": 1.4531044502978894,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 99.29559841752052,
"min": -30.998201608657837,
"max": 99.29559841752052,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03815904078399636,
"min": 0.03643144154790858,
"max": 6.362992232354979,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.632973814095749,
"min": 2.440906583709875,
"max": 114.53386018238962,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752070726",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752072826"
},
"total": 2100.336689523,
"count": 1,
"self": 0.47594196600084615,
"children": {
"run_training.setup": {
"total": 0.01938771099958103,
"count": 1,
"self": 0.01938771099958103
},
"TrainerController.start_learning": {
"total": 2099.8413598459997,
"count": 1,
"self": 1.1863590560283228,
"children": {
"TrainerController._reset_env": {
"total": 2.1372539079998205,
"count": 1,
"self": 2.1372539079998205
},
"TrainerController.advance": {
"total": 2096.436267752972,
"count": 63488,
"self": 1.320345641993299,
"children": {
"env_step": {
"total": 1457.8372393780573,
"count": 63488,
"self": 1315.0992466970974,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.0120474718551,
"count": 63488,
"self": 4.3782055877995845,
"children": {
"TorchPolicy.evaluate": {
"total": 137.63384188405553,
"count": 62553,
"self": 137.63384188405553
}
}
},
"workers": {
"total": 0.7259452091047933,
"count": 63488,
"self": 0.0,
"children": {
"worker_root": {
"total": 2094.975901201101,
"count": 63488,
"is_parallel": true,
"self": 887.5006538440944,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017048559998329438,
"count": 1,
"is_parallel": true,
"self": 0.000562718998935452,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011421370008974918,
"count": 8,
"is_parallel": true,
"self": 0.0011421370008974918
}
}
},
"UnityEnvironment.step": {
"total": 0.05692181500035076,
"count": 1,
"is_parallel": true,
"self": 0.000541477000751911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004398149999360612,
"count": 1,
"is_parallel": true,
"self": 0.0004398149999360612
},
"communicator.exchange": {
"total": 0.054387365999900794,
"count": 1,
"is_parallel": true,
"self": 0.054387365999900794
},
"steps_from_proto": {
"total": 0.0015531569997619954,
"count": 1,
"is_parallel": true,
"self": 0.00033723999968060525,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012159170000813901,
"count": 8,
"is_parallel": true,
"self": 0.0012159170000813901
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1207.4752473570065,
"count": 63487,
"is_parallel": true,
"self": 31.117190372985988,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.34756389995073,
"count": 63487,
"is_parallel": true,
"self": 22.34756389995073
},
"communicator.exchange": {
"total": 1060.7410423419697,
"count": 63487,
"is_parallel": true,
"self": 1060.7410423419697
},
"steps_from_proto": {
"total": 93.26945074210016,
"count": 63487,
"is_parallel": true,
"self": 18.68292743207303,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.58652331002713,
"count": 507896,
"is_parallel": true,
"self": 74.58652331002713
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.2786827329214,
"count": 63488,
"self": 2.3541906209611625,
"children": {
"process_trajectory": {
"total": 120.67445461795796,
"count": 63488,
"self": 120.47950041195872,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19495420599923818,
"count": 2,
"self": 0.19495420599923818
}
}
},
"_update_policy": {
"total": 514.2500374940023,
"count": 452,
"self": 287.00432728701117,
"children": {
"TorchPPOOptimizer.update": {
"total": 227.24571020699113,
"count": 22746,
"self": 227.24571020699113
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.269997462979518e-07,
"count": 1,
"self": 9.269997462979518e-07
},
"TrainerController._save_models": {
"total": 0.08147820199974376,
"count": 1,
"self": 0.001236320000316482,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08024188199942728,
"count": 1,
"self": 0.08024188199942728
}
}
}
}
}
}
}