metapat973's picture
First Push
6168c43 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4777142107486725,
"min": 0.43756237626075745,
"max": 1.5902884006500244,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14308.49609375,
"min": 13112.869140625,
"max": 48242.98828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989894.0,
"min": 29925.0,
"max": 989894.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989894.0,
"min": 29925.0,
"max": 989894.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.015151633881032467,
"min": -0.09408952295780182,
"max": 0.27493995428085327,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 3.7121503353118896,
"min": -22.769664764404297,
"max": 65.1607666015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02167777344584465,
"min": 0.012780423276126385,
"max": 0.4510452449321747,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.311054706573486,
"min": 3.0289602279663086,
"max": 109.15294647216797,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.03531383852209223,
"min": 0.031063595587814536,
"max": 0.04054398371767231,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.10594151556627669,
"min": 0.09319078676344361,
"max": 0.15932182816807958,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006047606308129616,
"min": 0.0008803721258724788,
"max": 0.009413041003328907,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.018142818924388848,
"min": 0.0030442110555311814,
"max": 0.02823912300998672,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5833974722333265e-06,
"min": 7.5833974722333265e-06,
"max": 0.0002941302019565999,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.275019241669998e-05,
"min": 2.275019241669998e-05,
"max": 0.0010726746424417999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252776666666667,
"min": 0.10252776666666667,
"max": 0.19804339999999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.3075833,
"min": 0.3075833,
"max": 0.7575582000000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00013613555666666655,
"min": 0.00013613555666666655,
"max": 0.0049023656600000005,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00040840666999999965,
"min": 0.00040840666999999965,
"max": 0.017882154180000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.021888481453061104,
"min": 0.021888481453061104,
"max": 0.8063771724700928,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06566544622182846,
"min": 0.06566544622182846,
"max": 2.4191315174102783,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 791.1111111111111,
"min": 791.1111111111111,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28480.0,
"min": 15957.0,
"max": 32712.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.04191106495757898,
"min": -0.9999750521965325,
"max": 0.09174701001714258,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 1.5087983384728432,
"min": -31.99920167028904,
"max": 3.1193983405828476,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.04191106495757898,
"min": -0.9999750521965325,
"max": 0.09174701001714258,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 1.5087983384728432,
"min": -31.99920167028904,
"max": 3.1193983405828476,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.17758784054603893,
"min": 0.17758784054603893,
"max": 13.169016491621733,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.393162259657402,
"min": 6.393162259657402,
"max": 239.65150392055511,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1768905307",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1768908449"
},
"total": 3142.081056051,
"count": 1,
"self": 0.5841038759995172,
"children": {
"run_training.setup": {
"total": 0.058920113000112906,
"count": 1,
"self": 0.058920113000112906
},
"TrainerController.start_learning": {
"total": 3141.4380320620003,
"count": 1,
"self": 2.58295364492551,
"children": {
"TrainerController._reset_env": {
"total": 3.759598251999705,
"count": 1,
"self": 3.759598251999705
},
"TrainerController.advance": {
"total": 3135.025784268075,
"count": 63243,
"self": 2.7039765840668224,
"children": {
"env_step": {
"total": 2237.1938376459684,
"count": 63243,
"self": 2057.3115365279923,
"children": {
"SubprocessEnvManager._take_step": {
"total": 178.26988093994123,
"count": 63243,
"self": 7.5526500289961405,
"children": {
"TorchPolicy.evaluate": {
"total": 170.7172309109451,
"count": 62580,
"self": 170.7172309109451
}
}
},
"workers": {
"total": 1.6124201780348812,
"count": 63243,
"self": 0.0,
"children": {
"worker_root": {
"total": 3133.06389361394,
"count": 63243,
"is_parallel": true,
"self": 1268.404417387862,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00475230099982582,
"count": 1,
"is_parallel": true,
"self": 0.0016317720001097769,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031205289997160435,
"count": 8,
"is_parallel": true,
"self": 0.0031205289997160435
}
}
},
"UnityEnvironment.step": {
"total": 0.07869636400027957,
"count": 1,
"is_parallel": true,
"self": 0.0006771770003979327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005675140000676038,
"count": 1,
"is_parallel": true,
"self": 0.0005675140000676038
},
"communicator.exchange": {
"total": 0.07445971199967971,
"count": 1,
"is_parallel": true,
"self": 0.07445971199967971
},
"steps_from_proto": {
"total": 0.0029919610001343244,
"count": 1,
"is_parallel": true,
"self": 0.0005647010011671227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024272599989672017,
"count": 8,
"is_parallel": true,
"self": 0.0024272599989672017
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1864.659476226078,
"count": 63242,
"is_parallel": true,
"self": 46.734430080159655,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 32.238916343945675,
"count": 63242,
"is_parallel": true,
"self": 32.238916343945675
},
"communicator.exchange": {
"total": 1639.4446291290305,
"count": 63242,
"is_parallel": true,
"self": 1639.4446291290305
},
"steps_from_proto": {
"total": 146.24150067294204,
"count": 63242,
"is_parallel": true,
"self": 30.330576059859595,
"children": {
"_process_rank_one_or_two_observation": {
"total": 115.91092461308244,
"count": 505936,
"is_parallel": true,
"self": 115.91092461308244
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 895.1279700380396,
"count": 63243,
"self": 4.847024772991517,
"children": {
"process_trajectory": {
"total": 171.88590044104603,
"count": 63243,
"self": 171.61383980204528,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27206063900075605,
"count": 2,
"self": 0.27206063900075605
}
}
},
"_update_policy": {
"total": 718.3950448240021,
"count": 119,
"self": 330.21039703300585,
"children": {
"TorchPPOOptimizer.update": {
"total": 388.1846477909962,
"count": 5799,
"self": 388.1846477909962
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.059999704011716e-06,
"count": 1,
"self": 1.059999704011716e-06
},
"TrainerController._save_models": {
"total": 0.0696948370004975,
"count": 1,
"self": 0.0015388510009870515,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06815598599951045,
"count": 1,
"self": 0.06815598599951045
}
}
}
}
}
}
}