Cihangirus's picture
First Push
ffac558 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3244812488555908,
"min": 1.3138551712036133,
"max": 2.8903205394744873,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12704.423828125,
"min": 12704.423828125,
"max": 29712.94140625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.213321685791016,
"min": 0.06984744966030121,
"max": 12.213321685791016,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2503.73095703125,
"min": 13.550405502319336,
"max": 2503.73095703125,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.163636363636364,
"min": 2.727272727272727,
"max": 25.704545454545453,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1384.0,
"min": 120.0,
"max": 1402.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.163636363636364,
"min": 2.727272727272727,
"max": 25.704545454545453,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1384.0,
"min": 120.0,
"max": 1402.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04497441239363979,
"min": 0.04381474067748058,
"max": 0.054947014540666715,
"count": 47
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.04497441239363979,
"min": 0.04381474067748058,
"max": 0.054947014540666715,
"count": 47
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19722176723182203,
"min": 0.08929812352926958,
"max": 0.27886126403297695,
"count": 47
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.19722176723182203,
"min": 0.08929812352926958,
"max": 0.27886126403297695,
"count": 47
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.21769826079999e-06,
"min": 5.21769826079999e-06,
"max": 0.00029345280218239997,
"count": 47
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.21769826079999e-06,
"min": 5.21769826079999e-06,
"max": 0.00029345280218239997,
"count": 47
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1,
"min": 0.1,
"max": 0.1,
"count": 47
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.1,
"min": 0.1,
"max": 0.1,
"count": 47
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.678607999999985e-05,
"min": 9.678607999999985e-05,
"max": 0.004891098240000002,
"count": 47
},
"SnowballTarget.Policy.Beta.sum": {
"value": 9.678607999999985e-05,
"min": 9.678607999999985e-05,
"max": 0.004891098240000002,
"count": 47
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739871488",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739872671"
},
"total": 1182.3685628489998,
"count": 1,
"self": 0.44264259499959735,
"children": {
"run_training.setup": {
"total": 0.026170147999891924,
"count": 1,
"self": 0.026170147999891924
},
"TrainerController.start_learning": {
"total": 1181.8997501060003,
"count": 1,
"self": 1.098616297001854,
"children": {
"TrainerController._reset_env": {
"total": 3.2039414559999386,
"count": 1,
"self": 3.2039414559999386
},
"TrainerController.advance": {
"total": 1177.495547802998,
"count": 45464,
"self": 1.0874667340115138,
"children": {
"env_step": {
"total": 800.7548569929843,
"count": 45464,
"self": 610.0053450599755,
"children": {
"SubprocessEnvManager._take_step": {
"total": 190.08538621703565,
"count": 45464,
"self": 3.4260745750455044,
"children": {
"TorchPolicy.evaluate": {
"total": 186.65931164199014,
"count": 45464,
"self": 186.65931164199014
}
}
},
"workers": {
"total": 0.664125715973114,
"count": 45464,
"self": 0.0,
"children": {
"worker_root": {
"total": 1178.3269263969971,
"count": 45464,
"is_parallel": true,
"self": 645.3714331400286,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006439217000206554,
"count": 1,
"is_parallel": true,
"self": 0.00456033499995101,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001878882000255544,
"count": 10,
"is_parallel": true,
"self": 0.001878882000255544
}
}
},
"UnityEnvironment.step": {
"total": 0.034223975000031714,
"count": 1,
"is_parallel": true,
"self": 0.0006162360002690548,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003777039999022236,
"count": 1,
"is_parallel": true,
"self": 0.0003777039999022236
},
"communicator.exchange": {
"total": 0.031380054000010205,
"count": 1,
"is_parallel": true,
"self": 0.031380054000010205
},
"steps_from_proto": {
"total": 0.0018499809998502315,
"count": 1,
"is_parallel": true,
"self": 0.0003563709994978126,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014936100003524189,
"count": 10,
"is_parallel": true,
"self": 0.0014936100003524189
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 532.9554932569686,
"count": 45463,
"is_parallel": true,
"self": 25.34702453798627,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.98602906699466,
"count": 45463,
"is_parallel": true,
"self": 13.98602906699466
},
"communicator.exchange": {
"total": 412.7122139389537,
"count": 45463,
"is_parallel": true,
"self": 412.7122139389537
},
"steps_from_proto": {
"total": 80.91022571303392,
"count": 45463,
"is_parallel": true,
"self": 14.811916336046352,
"children": {
"_process_rank_one_or_two_observation": {
"total": 66.09830937698757,
"count": 454630,
"is_parallel": true,
"self": 66.09830937698757
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 375.6532240760023,
"count": 45464,
"self": 1.3037740640115771,
"children": {
"process_trajectory": {
"total": 70.22354409699074,
"count": 45464,
"self": 69.13826416299116,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0852799339995727,
"count": 10,
"self": 1.0852799339995727
}
}
},
"_update_policy": {
"total": 304.125905915,
"count": 47,
"self": 171.08491809998827,
"children": {
"TorchPPOOptimizer.update": {
"total": 133.0409878150117,
"count": 9520,
"self": 133.0409878150117
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.650002539274283e-07,
"count": 1,
"self": 9.650002539274283e-07
},
"TrainerController._save_models": {
"total": 0.10164358500014714,
"count": 1,
"self": 0.0009567960000822495,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10068678900006489,
"count": 1,
"self": 0.10068678900006489
}
}
}
}
}
}
}