JD_Pyramids / run_logs /timers.json
jondister's picture
OK
48e34d0
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2740054726600647,
"min": 0.2730562090873718,
"max": 1.4977306127548218,
"count": 62
},
"Pyramids.Policy.Entropy.sum": {
"value": 8307.845703125,
"min": 8140.9052734375,
"max": 45435.15625,
"count": 62
},
"Pyramids.Step.mean": {
"value": 1859999.0,
"min": 29952.0,
"max": 1859999.0,
"count": 62
},
"Pyramids.Step.sum": {
"value": 1859999.0,
"min": 29952.0,
"max": 1859999.0,
"count": 62
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7399856448173523,
"min": -0.1160324364900589,
"max": 0.7706695795059204,
"count": 62
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 214.59584045410156,
"min": -27.8477840423584,
"max": 230.43020629882812,
"count": 62
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0003954443382099271,
"min": -0.475079745054245,
"max": 0.23874641954898834,
"count": 62
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.11467885971069336,
"min": -123.045654296875,
"max": 62.79030990600586,
"count": 62
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06819519865999205,
"min": 0.06443849667610912,
"max": 0.07189481942831273,
"count": 62
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0229279798998807,
"min": 0.46213526131274796,
"max": 1.0468417306101936,
"count": 62
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006462579289250749,
"min": 0.0003755952990652087,
"max": 0.03470437241395259,
"count": 62
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.09693868933876124,
"min": 0.0030047623925216695,
"max": 0.48586121379533626,
"count": 62
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.8497319280520006e-05,
"min": 3.8497319280520006e-05,
"max": 9.946118149120001e-05,
"count": 62
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0005774597892078001,
"min": 0.0005532830800511001,
"max": 0.0013125037874964,
"count": 62
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1192486288888889,
"min": 0.1192486288888889,
"max": 0.14973059047619047,
"count": 62
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.7887294333333335,
"min": 1.0448522666666666,
"max": 2.1562518,
"count": 62
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0038558760519999996,
"min": 0.0038558760519999996,
"max": 0.009946171977142856,
"count": 62
},
"Pyramids.Policy.Beta.sum": {
"value": 0.05783814077999999,
"min": 0.05541289511,
"max": 0.13126910964,
"count": 62
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01072565745562315,
"min": 0.01072565745562315,
"max": 0.4515376091003418,
"count": 62
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16088485717773438,
"min": 0.1523076295852661,
"max": 3.1607632637023926,
"count": 62
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 255.63392857142858,
"min": 255.63392857142858,
"max": 999.0,
"count": 62
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28631.0,
"min": 15984.0,
"max": 32878.0,
"count": 62
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7443660604102271,
"min": -1.0000000521540642,
"max": 1.7443660604102271,
"count": 62
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 195.36899876594543,
"min": -32.000001668930054,
"max": 201.3149983137846,
"count": 62
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7443660604102271,
"min": -1.0000000521540642,
"max": 1.7443660604102271,
"count": 62
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 195.36899876594543,
"min": -32.000001668930054,
"max": 201.3149983137846,
"count": 62
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028677355602277594,
"min": 0.028677355602277594,
"max": 8.665299324318767,
"count": 62
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2118638274550904,
"min": 3.1885658371757017,
"max": 138.64478918910027,
"count": 62
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 62
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 62
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675002787",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675006665"
},
"total": 3877.922660093,
"count": 1,
"self": 0.29484137700001156,
"children": {
"run_training.setup": {
"total": 0.10168285199998195,
"count": 1,
"self": 0.10168285199998195
},
"TrainerController.start_learning": {
"total": 3877.526135864,
"count": 1,
"self": 2.3328759859796264,
"children": {
"TrainerController._reset_env": {
"total": 9.395860048999992,
"count": 1,
"self": 9.395860048999992
},
"TrainerController.advance": {
"total": 3865.6627526650213,
"count": 120638,
"self": 2.3115398041468325,
"children": {
"env_step": {
"total": 2664.176755328975,
"count": 120638,
"self": 2472.235515589014,
"children": {
"SubprocessEnvManager._take_step": {
"total": 190.5436710629487,
"count": 120638,
"self": 8.094468863004863,
"children": {
"TorchPolicy.evaluate": {
"total": 182.44920219994384,
"count": 117213,
"self": 61.677348376016994,
"children": {
"TorchPolicy.sample_actions": {
"total": 120.77185382392685,
"count": 117213,
"self": 120.77185382392685
}
}
}
}
},
"workers": {
"total": 1.397568677012373,
"count": 120637,
"self": 0.0,
"children": {
"worker_root": {
"total": 3870.6442872089706,
"count": 120637,
"is_parallel": true,
"self": 1579.0851697909552,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005811796000017466,
"count": 1,
"is_parallel": true,
"self": 0.0034955159999867647,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023162800000307016,
"count": 8,
"is_parallel": true,
"self": 0.0023162800000307016
}
}
},
"UnityEnvironment.step": {
"total": 0.05087751400003526,
"count": 1,
"is_parallel": true,
"self": 0.00045667199998433716,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000562762000015482,
"count": 1,
"is_parallel": true,
"self": 0.000562762000015482
},
"communicator.exchange": {
"total": 0.048384400000031746,
"count": 1,
"is_parallel": true,
"self": 0.048384400000031746
},
"steps_from_proto": {
"total": 0.0014736800000036965,
"count": 1,
"is_parallel": true,
"self": 0.0003849100000365979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010887699999670986,
"count": 8,
"is_parallel": true,
"self": 0.0010887699999670986
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2291.5591174180154,
"count": 120636,
"is_parallel": true,
"self": 50.5236126431364,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 42.63799899598746,
"count": 120636,
"is_parallel": true,
"self": 42.63799899598746
},
"communicator.exchange": {
"total": 2026.4770038329646,
"count": 120636,
"is_parallel": true,
"self": 2026.4770038329646
},
"steps_from_proto": {
"total": 171.92050194592684,
"count": 120636,
"is_parallel": true,
"self": 40.72687829882801,
"children": {
"_process_rank_one_or_two_observation": {
"total": 131.19362364709883,
"count": 965088,
"is_parallel": true,
"self": 131.19362364709883
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1199.1744575318992,
"count": 120637,
"self": 4.3156157899272785,
"children": {
"process_trajectory": {
"total": 269.8897455539669,
"count": 120637,
"self": 269.60489855996695,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28484699399996316,
"count": 3,
"self": 0.28484699399996316
}
}
},
"_update_policy": {
"total": 924.9690961880049,
"count": 853,
"self": 358.3395247110418,
"children": {
"TorchPPOOptimizer.update": {
"total": 566.6295714769631,
"count": 42696,
"self": 566.6295714769631
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4959996406105347e-06,
"count": 1,
"self": 1.4959996406105347e-06
},
"TrainerController._save_models": {
"total": 0.13464566799939348,
"count": 1,
"self": 0.001843478999035142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13280218900035834,
"count": 1,
"self": 0.13280218900035834
}
}
}
}
}
}
}