Pyramids / run_logs /timers.json
Lethargus's picture
First Push
c8125b4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4531543254852295,
"min": 0.4531543254852295,
"max": 1.0132265090942383,
"count": 17
},
"Pyramids.Policy.Entropy.sum": {
"value": 13427.869140625,
"min": 10796.94140625,
"max": 30436.46875,
"count": 17
},
"Pyramids.Step.mean": {
"value": 989961.0,
"min": 509993.0,
"max": 989961.0,
"count": 17
},
"Pyramids.Step.sum": {
"value": 989961.0,
"min": 509993.0,
"max": 989961.0,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.314156711101532,
"min": -0.020626777783036232,
"max": 0.3798234164714813,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 82.3090591430664,
"min": -1.6707689762115479,
"max": 98.75408935546875,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.033712223172187805,
"min": -0.033712223172187805,
"max": 0.04274001717567444,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -8.832602500915527,
"min": -8.832602500915527,
"max": 11.069664001464844,
"count": 17
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 486.42622950819674,
"min": 309.25,
"max": 834.025641025641,
"count": 17
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29672.0,
"min": 1237.0,
"max": 32696.0,
"count": 17
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1856098056572382,
"min": -0.11661030103762944,
"max": 1.690750002861023,
"count": 17
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 72.32219814509153,
"min": -4.547801740467548,
"max": 83.06279814988375,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1856098056572382,
"min": -0.11661030103762944,
"max": 1.690750002861023,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 72.32219814509153,
"min": -4.547801740467548,
"max": 83.06279814988375,
"count": 17
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06454847306852825,
"min": 0.051007845118874684,
"max": 0.12605790901579894,
"count": 17
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.937456857180223,
"min": 0.20403138047549874,
"max": 5.042316360631958,
"count": 17
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06812104680284393,
"min": 0.06629444303351203,
"max": 0.07307606635534865,
"count": 17
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.953694655239815,
"min": 0.2923042654213946,
"max": 1.0957097937838018,
"count": 17
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013116745470878889,
"min": 0.0026308059390371535,
"max": 0.014184579108890222,
"count": 17
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18363443659230444,
"min": 0.010523223756148614,
"max": 0.19858410752446312,
"count": 17
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.550047483350001e-06,
"min": 7.550047483350001e-06,
"max": 0.00014832387555872498,
"count": 17
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010570066476690001,
"min": 0.00010570066476690001,
"max": 0.0018691090769639,
"count": 17
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251664999999999,
"min": 0.10251664999999999,
"max": 0.14944127500000004,
"count": 17
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352330999999998,
"min": 0.5977651000000002,
"max": 2.0230361000000006,
"count": 17
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026141333500000007,
"min": 0.00026141333500000007,
"max": 0.0049491833725,
"count": 17
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003659786690000001,
"min": 0.003659786690000001,
"max": 0.06238130639,
"count": 17
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012849760241806507,
"min": 0.011811994947493076,
"max": 0.015293809585273266,
"count": 17
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17989663779735565,
"min": 0.05437934026122093,
"max": 0.21411333978176117,
"count": 17
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694541431",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\bdonn\\miniconda3\\envs\\DRL\\Scripts\\mlagents-learn ./config/ppo/PyramidsRND.yaml --env=../Pyramids/UnityEnvironment.exe --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1",
"numpy_version": "1.25.2",
"end_time_seconds": "1694542605"
},
"total": 1173.3677240000106,
"count": 1,
"self": 0.3613532001618296,
"children": {
"run_training.setup": {
"total": 0.17205539997667074,
"count": 1,
"self": 0.17205539997667074
},
"TrainerController.start_learning": {
"total": 1172.8343153998721,
"count": 1,
"self": 1.0854164024349302,
"children": {
"TrainerController._reset_env": {
"total": 6.197830699849874,
"count": 1,
"self": 6.197830699849874
},
"TrainerController.advance": {
"total": 1165.4142691974994,
"count": 31986,
"self": 0.9317487992811948,
"children": {
"env_step": {
"total": 654.2052320900839,
"count": 31986,
"self": 524.4149543903768,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.08072021254338,
"count": 31986,
"self": 2.8359906140249223,
"children": {
"TorchPolicy.evaluate": {
"total": 126.24472959851846,
"count": 31312,
"self": 126.24472959851846
}
}
},
"workers": {
"total": 0.7095574871636927,
"count": 31986,
"self": 0.0,
"children": {
"worker_root": {
"total": 1166.8321079106536,
"count": 31986,
"is_parallel": true,
"self": 713.9999165155459,
"children": {
"steps_from_proto": {
"total": 0.0012974001001566648,
"count": 1,
"is_parallel": true,
"self": 0.0003544003702700138,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000942999729886651,
"count": 8,
"is_parallel": true,
"self": 0.000942999729886651
}
}
},
"UnityEnvironment.step": {
"total": 452.8308939950075,
"count": 31986,
"is_parallel": true,
"self": 11.983218401670456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.725233814446256,
"count": 31986,
"is_parallel": true,
"self": 9.725233814446256
},
"communicator.exchange": {
"total": 396.8172612024937,
"count": 31986,
"is_parallel": true,
"self": 396.8172612024937
},
"steps_from_proto": {
"total": 34.305180576397106,
"count": 31986,
"is_parallel": true,
"self": 7.4639062327332795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.841274343663827,
"count": 255888,
"is_parallel": true,
"self": 26.841274343663827
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 510.2772883081343,
"count": 31986,
"self": 2.3398887978401035,
"children": {
"process_trajectory": {
"total": 98.11965311155654,
"count": 31986,
"self": 97.80131631135009,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3183368002064526,
"count": 2,
"self": 0.3183368002064526
}
}
},
"_update_policy": {
"total": 409.81774639873765,
"count": 234,
"self": 222.75601530075073,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.06173109798692,
"count": 11346,
"self": 187.06173109798692
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.00000761449337e-06,
"count": 1,
"self": 1.00000761449337e-06
},
"TrainerController._save_models": {
"total": 0.1367981000803411,
"count": 1,
"self": 0.016386400209739804,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1204116998706013,
"count": 1,
"self": 0.1204116998706013
}
}
}
}
}
}
}