Pyramids / run_logs /timers.json
theostoican's picture
First commit
8ae4200
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5051900744438171,
"min": 0.5051900744438171,
"max": 1.4581128358840942,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15179.951171875,
"min": 15179.951171875,
"max": 44233.3125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989897.0,
"min": 29894.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989897.0,
"min": 29894.0,
"max": 989897.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2487221360206604,
"min": -0.10975060611963272,
"max": 0.25868022441864014,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 64.66775512695312,
"min": -26.559646606445312,
"max": 65.96345520019531,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.17655110359191895,
"min": -0.31895631551742554,
"max": 0.22103823721408844,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -45.90328598022461,
"min": -81.3338623046875,
"max": 53.27021408081055,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06898486472099515,
"min": 0.06515650913934222,
"max": 0.0729250141645882,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.965788106093932,
"min": 0.5809412502324751,
"max": 1.0904194857381906,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015495144149084077,
"min": 0.0005266084795960589,
"max": 0.023456230520322505,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21693201808717708,
"min": 0.006845910234748765,
"max": 0.35184345780483756,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.273661861192861e-06,
"min": 7.273661861192861e-06,
"max": 0.0002947701017433,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010183126605670005,
"min": 0.00010183126605670005,
"max": 0.0035082698305767997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242452142857143,
"min": 0.10242452142857143,
"max": 0.1982567,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339433000000001,
"min": 1.4339433000000001,
"max": 2.5694231999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025220969071428593,
"min": 0.00025220969071428593,
"max": 0.00982584433,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035309356700000028,
"min": 0.0035309356700000028,
"max": 0.11696537767999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009560888633131981,
"min": 0.009329685010015965,
"max": 0.3751799762248993,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13385243713855743,
"min": 0.13070960342884064,
"max": 3.0014398097991943,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 561.8888888888889,
"min": 561.8888888888889,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30342.0,
"min": 17332.0,
"max": 33739.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0675999667081568,
"min": -0.9999000513926148,
"max": 1.0675999667081568,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 57.65039820224047,
"min": -31.996801644563675,
"max": 57.65039820224047,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0675999667081568,
"min": -0.9999000513926148,
"max": 1.0675999667081568,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 57.65039820224047,
"min": -31.996801644563675,
"max": 57.65039820224047,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05554253103521963,
"min": 0.05554253103521963,
"max": 7.033807673439798,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.99929667590186,
"min": 2.8350150443620805,
"max": 126.60853812191635,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704015333",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704017582"
},
"total": 2248.932787756,
"count": 1,
"self": 0.4933229630000824,
"children": {
"run_training.setup": {
"total": 0.058000097999979516,
"count": 1,
"self": 0.058000097999979516
},
"TrainerController.start_learning": {
"total": 2248.381464695,
"count": 1,
"self": 1.5899396879872256,
"children": {
"TrainerController._reset_env": {
"total": 3.9022496499999306,
"count": 1,
"self": 3.9022496499999306
},
"TrainerController.advance": {
"total": 2242.800112424013,
"count": 63452,
"self": 1.7124911399419034,
"children": {
"env_step": {
"total": 1594.7246473900468,
"count": 63452,
"self": 1455.2875197899486,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.51344353506022,
"count": 63452,
"self": 4.944456437999747,
"children": {
"TorchPolicy.evaluate": {
"total": 133.56898709706047,
"count": 62568,
"self": 133.56898709706047
}
}
},
"workers": {
"total": 0.9236840650379463,
"count": 63452,
"self": 0.0,
"children": {
"worker_root": {
"total": 2243.0319351120465,
"count": 63452,
"is_parallel": true,
"self": 913.1511439750966,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007315004000020053,
"count": 1,
"is_parallel": true,
"self": 0.0037803610000537446,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0035346429999663087,
"count": 8,
"is_parallel": true,
"self": 0.0035346429999663087
}
}
},
"UnityEnvironment.step": {
"total": 0.04736910100007208,
"count": 1,
"is_parallel": true,
"self": 0.0006474710000929917,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004914889999554362,
"count": 1,
"is_parallel": true,
"self": 0.0004914889999554362
},
"communicator.exchange": {
"total": 0.044555703000014546,
"count": 1,
"is_parallel": true,
"self": 0.044555703000014546
},
"steps_from_proto": {
"total": 0.0016744380000091041,
"count": 1,
"is_parallel": true,
"self": 0.0004093840002497018,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012650539997594024,
"count": 8,
"is_parallel": true,
"self": 0.0012650539997594024
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1329.88079113695,
"count": 63451,
"is_parallel": true,
"self": 36.94967154988035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.728953440050304,
"count": 63451,
"is_parallel": true,
"self": 24.728953440050304
},
"communicator.exchange": {
"total": 1167.0420609079702,
"count": 63451,
"is_parallel": true,
"self": 1167.0420609079702
},
"steps_from_proto": {
"total": 101.16010523904924,
"count": 63451,
"is_parallel": true,
"self": 20.426797689050318,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.73330754999893,
"count": 507608,
"is_parallel": true,
"self": 80.73330754999893
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.3629738940244,
"count": 63452,
"self": 3.0134323490100314,
"children": {
"process_trajectory": {
"total": 129.01817224601086,
"count": 63452,
"self": 128.8183325930106,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19983965300025375,
"count": 2,
"self": 0.19983965300025375
}
}
},
"_update_policy": {
"total": 514.3313692990035,
"count": 451,
"self": 303.6601458060187,
"children": {
"TorchPPOOptimizer.update": {
"total": 210.67122349298484,
"count": 22767,
"self": 210.67122349298484
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0769999789772555e-06,
"count": 1,
"self": 1.0769999789772555e-06
},
"TrainerController._save_models": {
"total": 0.08916185599991877,
"count": 1,
"self": 0.001354629000161367,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0878072269997574,
"count": 1,
"self": 0.0878072269997574
}
}
}
}
}
}
}