PyramidTraining / run_logs /timers.json
abarekatain's picture
Initial Commit
a7b1c79
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30164167284965515,
"min": 0.30164167284965515,
"max": 1.3872199058532715,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9034.771484375,
"min": 9034.771484375,
"max": 42082.703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.003368256613612175,
"min": -0.09132780134677887,
"max": 0.3404161036014557,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.8252228498458862,
"min": -22.010000228881836,
"max": 89.52943420410156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -1.6514716148376465,
"min": -4.42167854309082,
"max": 0.45722198486328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -404.61053466796875,
"min": -1136.371337890625,
"max": 120.24938201904297,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07037074958151648,
"min": 0.06462560997247378,
"max": 0.07250211167792302,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9851904941412308,
"min": 0.48051184919681506,
"max": 1.0170366373594115,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 1.7136813140221865,
"min": 0.0008351562572567773,
"max": 3.4419179982623134,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 23.99153839631061,
"min": 0.005846093800797441,
"max": 48.18685197567239,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.310818991664285e-06,
"min": 7.310818991664285e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010235146588329999,
"min": 0.00010235146588329999,
"max": 0.003506198331267299,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243690714285714,
"min": 0.10243690714285714,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341167,
"min": 1.3691136000000002,
"max": 2.6176721,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002534470235714286,
"min": 0.0002534470235714286,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035482583300000007,
"min": 0.0035482583300000007,
"max": 0.11689639673,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004406671971082687,
"min": 0.004406671971082687,
"max": 0.5200653672218323,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06169340759515762,
"min": 0.06169340759515762,
"max": 3.6404573917388916,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 850.7142857142857,
"min": 488.3898305084746,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29775.0,
"min": 15984.0,
"max": 32585.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.27958860439913613,
"min": -1.0000000521540642,
"max": 1.1927374708466232,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -9.785601153969765,
"min": -32.000001668930054,
"max": 76.33519813418388,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.27958860439913613,
"min": -1.0000000521540642,
"max": 1.1927374708466232,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -9.785601153969765,
"min": -32.000001668930054,
"max": 76.33519813418388,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.042571697476419756,
"min": 0.03502741596571468,
"max": 10.428025946952403,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.4900094116746914,
"min": 1.4900094116746914,
"max": 166.84841515123844,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677676672",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677678972"
},
"total": 2299.8091954189995,
"count": 1,
"self": 0.4431325139994442,
"children": {
"run_training.setup": {
"total": 0.1959459740000966,
"count": 1,
"self": 0.1959459740000966
},
"TrainerController.start_learning": {
"total": 2299.170116931,
"count": 1,
"self": 1.406129512994994,
"children": {
"TrainerController._reset_env": {
"total": 6.304376191999836,
"count": 1,
"self": 6.304376191999836
},
"TrainerController.advance": {
"total": 2291.374878508005,
"count": 63519,
"self": 1.448010674116631,
"children": {
"env_step": {
"total": 1567.6856491798317,
"count": 63519,
"self": 1452.8211917665376,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.04192062918082,
"count": 63519,
"self": 4.71423035420321,
"children": {
"TorchPolicy.evaluate": {
"total": 109.32769027497761,
"count": 62563,
"self": 37.091357798996796,
"children": {
"TorchPolicy.sample_actions": {
"total": 72.23633247598082,
"count": 62563,
"self": 72.23633247598082
}
}
}
}
},
"workers": {
"total": 0.8225367841132538,
"count": 63519,
"self": 0.0,
"children": {
"worker_root": {
"total": 2293.966024097939,
"count": 63519,
"is_parallel": true,
"self": 959.1783317589261,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020415419994606054,
"count": 1,
"is_parallel": true,
"self": 0.0007484560010198038,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012930859984408016,
"count": 8,
"is_parallel": true,
"self": 0.0012930859984408016
}
}
},
"UnityEnvironment.step": {
"total": 0.04755457499959448,
"count": 1,
"is_parallel": true,
"self": 0.0005325649999576854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047472099959122716,
"count": 1,
"is_parallel": true,
"self": 0.00047472099959122716
},
"communicator.exchange": {
"total": 0.044931307999831915,
"count": 1,
"is_parallel": true,
"self": 0.044931307999831915
},
"steps_from_proto": {
"total": 0.001615981000213651,
"count": 1,
"is_parallel": true,
"self": 0.0004088219993718667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012071590008417843,
"count": 8,
"is_parallel": true,
"self": 0.0012071590008417843
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1334.7876923390131,
"count": 63518,
"is_parallel": true,
"self": 31.388689326024178,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.890762202056976,
"count": 63518,
"is_parallel": true,
"self": 23.890762202056976
},
"communicator.exchange": {
"total": 1185.0089890899271,
"count": 63518,
"is_parallel": true,
"self": 1185.0089890899271
},
"steps_from_proto": {
"total": 94.49925172100484,
"count": 63518,
"is_parallel": true,
"self": 23.022190963196408,
"children": {
"_process_rank_one_or_two_observation": {
"total": 71.47706075780843,
"count": 508144,
"is_parallel": true,
"self": 71.47706075780843
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 722.2412186540569,
"count": 63519,
"self": 2.648497806011619,
"children": {
"process_trajectory": {
"total": 162.54423878405396,
"count": 63519,
"self": 162.30378199605366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24045678800030146,
"count": 2,
"self": 0.24045678800030146
}
}
},
"_update_policy": {
"total": 557.0484820639913,
"count": 446,
"self": 214.37177121203877,
"children": {
"TorchPPOOptimizer.update": {
"total": 342.6767108519525,
"count": 22791,
"self": 342.6767108519525
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1680003808578476e-06,
"count": 1,
"self": 1.1680003808578476e-06
},
"TrainerController._save_models": {
"total": 0.08473154999956023,
"count": 1,
"self": 0.0013658129992109025,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08336573700034933,
"count": 1,
"self": 0.08336573700034933
}
}
}
}
}
}
}