St-ep's picture
First Push
29a96fa verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.1762984991073608,
"min": 1.1756806373596191,
"max": 2.621238946914673,
"count": 43
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 11283.0546875,
"min": 11283.0546875,
"max": 25987.68359375,
"count": 43
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 79968.0,
"max": 499976.0,
"count": 43
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 79968.0,
"max": 499976.0,
"count": 43
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.787611961364746,
"min": 2.283193588256836,
"max": 12.787611961364746,
"count": 43
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2621.46044921875,
"min": 228.31935119628906,
"max": 2621.46044921875,
"count": 43
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 43
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 4378.0,
"max": 10945.0,
"count": 43
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.90909090909091,
"min": 10.090909090909092,
"max": 26.266666666666666,
"count": 43
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1425.0,
"min": 222.0,
"max": 1425.0,
"count": 43
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.90909090909091,
"min": 10.090909090909092,
"max": 26.266666666666666,
"count": 43
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1425.0,
"min": 222.0,
"max": 1425.0,
"count": 43
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 43
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 43
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06489407757253503,
"min": 0.06036975931660417,
"max": 0.08248073132896631,
"count": 40
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06489407757253503,
"min": 0.06036975931660417,
"max": 0.08248073132896631,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18900001337751746,
"min": 0.18232523016631602,
"max": 0.29615254122763873,
"count": 40
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.18900001337751746,
"min": 0.18232523016631602,
"max": 0.29615254122763873,
"count": 40
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.320098559999991e-06,
"min": 4.320098559999991e-06,
"max": 0.0002485728171424,
"count": 40
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.320098559999991e-06,
"min": 4.320098559999991e-06,
"max": 0.0002485728171424,
"count": 40
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10143999999999999,
"min": 0.10143999999999999,
"max": 0.18285759999999998,
"count": 40
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10143999999999999,
"min": 0.10143999999999999,
"max": 0.18285759999999998,
"count": 40
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001538559999999997,
"min": 0.0001538559999999997,
"max": 0.008287474239999997,
"count": 40
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001538559999999997,
"min": 0.0001538559999999997,
"max": 0.008287474239999997,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753665409",
"python_version": "3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0]",
"command_line_arguments": "/content/myenv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753666228"
},
"total": 818.3160977670002,
"count": 1,
"self": 0.3216678219998812,
"children": {
"run_training.setup": {
"total": 0.018376912000121592,
"count": 1,
"self": 0.018376912000121592
},
"TrainerController.start_learning": {
"total": 817.9760530330002,
"count": 1,
"self": 0.8359626409924203,
"children": {
"TrainerController._reset_env": {
"total": 2.10856075300012,
"count": 1,
"self": 2.10856075300012
},
"TrainerController.advance": {
"total": 814.9496127620077,
"count": 38664,
"self": 0.7878320360491671,
"children": {
"env_step": {
"total": 565.1716780359611,
"count": 38664,
"self": 422.3052517529202,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.35733210701142,
"count": 38664,
"self": 2.5657066400149233,
"children": {
"TorchPolicy.evaluate": {
"total": 139.7916254669965,
"count": 38664,
"self": 139.7916254669965
}
}
},
"workers": {
"total": 0.5090941760295209,
"count": 38664,
"self": 0.0,
"children": {
"worker_root": {
"total": 816.4277501870142,
"count": 38664,
"is_parallel": true,
"self": 444.1115703140108,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002225774999942587,
"count": 1,
"is_parallel": true,
"self": 0.0008115320001707005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014142429997718864,
"count": 10,
"is_parallel": true,
"self": 0.0014142429997718864
}
}
},
"UnityEnvironment.step": {
"total": 0.02700871299998653,
"count": 1,
"is_parallel": true,
"self": 0.0003939330001685448,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002823669999543199,
"count": 1,
"is_parallel": true,
"self": 0.0002823669999543199
},
"communicator.exchange": {
"total": 0.02495286699991084,
"count": 1,
"is_parallel": true,
"self": 0.02495286699991084
},
"steps_from_proto": {
"total": 0.0013795459999528248,
"count": 1,
"is_parallel": true,
"self": 0.00030719899973519205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010723470002176327,
"count": 10,
"is_parallel": true,
"self": 0.0010723470002176327
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 372.3161798730034,
"count": 38663,
"is_parallel": true,
"self": 12.848969081934456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.157196803053239,
"count": 38663,
"is_parallel": true,
"self": 7.157196803053239
},
"communicator.exchange": {
"total": 310.17692784402584,
"count": 38663,
"is_parallel": true,
"self": 310.17692784402584
},
"steps_from_proto": {
"total": 42.133086143989885,
"count": 38663,
"is_parallel": true,
"self": 8.218537388873983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 33.9145487551159,
"count": 386630,
"is_parallel": true,
"self": 33.9145487551159
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 248.99010268999746,
"count": 38664,
"self": 0.998618152046447,
"children": {
"process_trajectory": {
"total": 54.729835017951245,
"count": 38664,
"self": 53.9497587029507,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7800763150005423,
"count": 9,
"self": 0.7800763150005423
}
}
},
"_update_policy": {
"total": 193.26164951999976,
"count": 40,
"self": 78.79377621699314,
"children": {
"TorchPPOOptimizer.update": {
"total": 114.46787330300663,
"count": 9750,
"self": 114.46787330300663
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0369999472459313e-06,
"count": 1,
"self": 1.0369999472459313e-06
},
"TrainerController._save_models": {
"total": 0.0819158399999651,
"count": 1,
"self": 0.0010463180001352157,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08086952199982989,
"count": 1,
"self": 0.08086952199982989
}
}
}
}
}
}
}