loisonchambers's picture
First Push
1a93ffb verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.90290367603302,
"min": 0.90290367603302,
"max": 2.857667922973633,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8581.1962890625,
"min": 8581.1962890625,
"max": 29171.07421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.922199249267578,
"min": 0.547727644443512,
"max": 12.922199249267578,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2519.828857421875,
"min": 106.25916290283203,
"max": 2615.2138671875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07383112957491084,
"min": 0.06289154015710878,
"max": 0.07383112957491084,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29532451829964335,
"min": 0.2566959803818962,
"max": 0.3559083703630855,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18360737736756896,
"min": 0.12361180260503554,
"max": 0.28361692697394125,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7344295094702759,
"min": 0.49444721042014217,
"max": 1.4180846348697063,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.59090909090909,
"min": 3.75,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1126.0,
"min": 165.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.59090909090909,
"min": 3.75,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1126.0,
"min": 165.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1775640671",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1775641186"
},
"total": 515.7371517879999,
"count": 1,
"self": 0.5355481149997559,
"children": {
"run_training.setup": {
"total": 0.029894897000076526,
"count": 1,
"self": 0.029894897000076526
},
"TrainerController.start_learning": {
"total": 515.1717087760001,
"count": 1,
"self": 0.44976438900891935,
"children": {
"TrainerController._reset_env": {
"total": 3.613090886000009,
"count": 1,
"self": 3.613090886000009
},
"TrainerController.advance": {
"total": 511.0178313079913,
"count": 18192,
"self": 0.46766963300547104,
"children": {
"env_step": {
"total": 378.3834207429894,
"count": 18192,
"self": 296.059643676016,
"children": {
"SubprocessEnvManager._take_step": {
"total": 82.05908661199226,
"count": 18192,
"self": 1.467702041984353,
"children": {
"TorchPolicy.evaluate": {
"total": 80.5913845700079,
"count": 18192,
"self": 80.5913845700079
}
}
},
"workers": {
"total": 0.26469045498117794,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 512.8380234650117,
"count": 18192,
"is_parallel": true,
"self": 252.93570228799922,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005434188999970502,
"count": 1,
"is_parallel": true,
"self": 0.00383702500016625,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001597163999804252,
"count": 10,
"is_parallel": true,
"self": 0.001597163999804252
}
}
},
"UnityEnvironment.step": {
"total": 0.07691397600001437,
"count": 1,
"is_parallel": true,
"self": 0.0007252190000599512,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000451492999900438,
"count": 1,
"is_parallel": true,
"self": 0.000451492999900438
},
"communicator.exchange": {
"total": 0.07147464199999831,
"count": 1,
"is_parallel": true,
"self": 0.07147464199999831
},
"steps_from_proto": {
"total": 0.004262622000055671,
"count": 1,
"is_parallel": true,
"self": 0.00040748800006440433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003855133999991267,
"count": 10,
"is_parallel": true,
"self": 0.003855133999991267
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 259.9023211770125,
"count": 18191,
"is_parallel": true,
"self": 11.954734494993772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.377827879995721,
"count": 18191,
"is_parallel": true,
"self": 6.377827879995721
},
"communicator.exchange": {
"total": 197.68831062700212,
"count": 18191,
"is_parallel": true,
"self": 197.68831062700212
},
"steps_from_proto": {
"total": 43.881448175020864,
"count": 18191,
"is_parallel": true,
"self": 7.537716884013776,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.34373129100709,
"count": 181910,
"is_parallel": true,
"self": 36.34373129100709
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 132.1667409319964,
"count": 18192,
"self": 0.535443234009449,
"children": {
"process_trajectory": {
"total": 30.300311558988028,
"count": 18192,
"self": 29.81309932898796,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4872122300000683,
"count": 4,
"self": 0.4872122300000683
}
}
},
"_update_policy": {
"total": 101.33098613899892,
"count": 90,
"self": 41.323106336001615,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.00787980299731,
"count": 4587,
"self": 60.00787980299731
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4509998891298892e-06,
"count": 1,
"self": 1.4509998891298892e-06
},
"TrainerController._save_models": {
"total": 0.09102074199995513,
"count": 1,
"self": 0.0010112990000834543,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09000944299987168,
"count": 1,
"self": 0.09000944299987168
}
}
}
}
}
}
}