Pyramids / run_logs /timers.json
mnity's picture
First model
ae11cd5
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39278444647789,
"min": 0.3806266188621521,
"max": 1.446491003036499,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11802.38671875,
"min": 11339.6279296875,
"max": 43880.75,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989943.0,
"min": 29952.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989943.0,
"min": 29952.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5195940136909485,
"min": -0.14950436353683472,
"max": 0.6101991534233093,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 144.4471435546875,
"min": -36.03055191040039,
"max": 171.46595764160156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011042957194149494,
"min": -0.02288067899644375,
"max": 0.3780382573604584,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.069941997528076,
"min": -6.223544597625732,
"max": 91.10722351074219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06673937001670267,
"min": 0.06492917673445016,
"max": 0.07400380942683012,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9343511802338375,
"min": 0.5180266659878108,
"max": 1.070502714963867,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015364833844484694,
"min": 0.0016270437209681616,
"max": 0.016112067339625873,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2151076738227857,
"min": 0.02180253677689986,
"max": 0.22556894275476225,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.559497480199998e-06,
"min": 7.559497480199998e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010583296472279998,
"min": 0.00010583296472279998,
"max": 0.0035078189307270993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251979999999998,
"min": 0.10251979999999998,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352771999999998,
"min": 1.3886848,
"max": 2.5692729000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002617280199999999,
"min": 0.0002617280199999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036641922799999986,
"min": 0.0036641922799999986,
"max": 0.11695036271,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01091736275702715,
"min": 0.01091736275702715,
"max": 0.5549317002296448,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15284307301044464,
"min": 0.15284307301044464,
"max": 3.884521961212158,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 361.13953488372096,
"min": 335.07865168539325,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31058.0,
"min": 15984.0,
"max": 33133.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6388604490216387,
"min": -1.0000000521540642,
"max": 1.6424381834235084,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 140.94199861586094,
"min": -29.16500174999237,
"max": 146.17699832469225,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6388604490216387,
"min": -1.0000000521540642,
"max": 1.6424381834235084,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 140.94199861586094,
"min": -29.16500174999237,
"max": 146.17699832469225,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.040977215545480734,
"min": 0.040977215545480734,
"max": 11.740895867347717,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5240405369113432,
"min": 3.5240405369113432,
"max": 187.85433387756348,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698055130",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698057382"
},
"total": 2251.6610180609996,
"count": 1,
"self": 0.4755675909996171,
"children": {
"run_training.setup": {
"total": 0.04264913899987732,
"count": 1,
"self": 0.04264913899987732
},
"TrainerController.start_learning": {
"total": 2251.142801331,
"count": 1,
"self": 1.2884930579580214,
"children": {
"TrainerController._reset_env": {
"total": 8.601933666999912,
"count": 1,
"self": 8.601933666999912
},
"TrainerController.advance": {
"total": 2241.1807903570425,
"count": 63973,
"self": 1.367073860075834,
"children": {
"env_step": {
"total": 1616.534791287982,
"count": 63973,
"self": 1485.1269635530532,
"children": {
"SubprocessEnvManager._take_step": {
"total": 130.57796421193098,
"count": 63973,
"self": 4.52324459084798,
"children": {
"TorchPolicy.evaluate": {
"total": 126.054719621083,
"count": 62566,
"self": 126.054719621083
}
}
},
"workers": {
"total": 0.8298635229978117,
"count": 63973,
"self": 0.0,
"children": {
"worker_root": {
"total": 2246.5180854650757,
"count": 63973,
"is_parallel": true,
"self": 873.4525179910534,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005133394999802476,
"count": 1,
"is_parallel": true,
"self": 0.003602686000022004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015307089997804724,
"count": 8,
"is_parallel": true,
"self": 0.0015307089997804724
}
}
},
"UnityEnvironment.step": {
"total": 0.04780542899993634,
"count": 1,
"is_parallel": true,
"self": 0.0006108409997978015,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048078700001497054,
"count": 1,
"is_parallel": true,
"self": 0.00048078700001497054
},
"communicator.exchange": {
"total": 0.04512562300010359,
"count": 1,
"is_parallel": true,
"self": 0.04512562300010359
},
"steps_from_proto": {
"total": 0.0015881780000199797,
"count": 1,
"is_parallel": true,
"self": 0.0003285549998963688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012596230001236108,
"count": 8,
"is_parallel": true,
"self": 0.0012596230001236108
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1373.0655674740224,
"count": 63972,
"is_parallel": true,
"self": 34.28212932815336,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.112690912883636,
"count": 63972,
"is_parallel": true,
"self": 24.112690912883636
},
"communicator.exchange": {
"total": 1217.9407966379792,
"count": 63972,
"is_parallel": true,
"self": 1217.9407966379792
},
"steps_from_proto": {
"total": 96.72995059500613,
"count": 63972,
"is_parallel": true,
"self": 19.142107524229232,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.5878430707769,
"count": 511776,
"is_parallel": true,
"self": 77.5878430707769
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 623.2789252089844,
"count": 63973,
"self": 2.6293287830096688,
"children": {
"process_trajectory": {
"total": 123.36865580296922,
"count": 63973,
"self": 123.15904870296913,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20960710000008476,
"count": 2,
"self": 0.20960710000008476
}
}
},
"_update_policy": {
"total": 497.28094062300556,
"count": 452,
"self": 295.8223338260657,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.45860679693988,
"count": 22812,
"self": 201.45860679693988
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.929999578162096e-07,
"count": 1,
"self": 9.929999578162096e-07
},
"TrainerController._save_models": {
"total": 0.07158325599993987,
"count": 1,
"self": 0.0013586709992523538,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07022458500068751,
"count": 1,
"self": 0.07022458500068751
}
}
}
}
}
}
}