JK303's picture
First Push
0d7db90 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7694647908210754,
"min": 0.7694647908210754,
"max": 2.853088140487671,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7312.9931640625,
"min": 7312.9931640625,
"max": 29124.32421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.947829246520996,
"min": 0.4790917634963989,
"max": 12.947829246520996,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2524.82666015625,
"min": 92.94380187988281,
"max": 2636.457763671875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06821512413929062,
"min": 0.06408494403630731,
"max": 0.07848781842938692,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2728604965571625,
"min": 0.25633977614522924,
"max": 0.3718002547119635,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2082201208846242,
"min": 0.13781711574433847,
"max": 0.2896621895771401,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8328804835384968,
"min": 0.5512684629773539,
"max": 1.4483109478857004,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.045454545454547,
"min": 4.25,
"max": 26.045454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1146.0,
"min": 187.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.045454545454547,
"min": 4.25,
"max": 26.045454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1146.0,
"min": 187.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1745050336",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1745050768"
},
"total": 432.055839913,
"count": 1,
"self": 0.5504441329999281,
"children": {
"run_training.setup": {
"total": 0.03245625299996391,
"count": 1,
"self": 0.03245625299996391
},
"TrainerController.start_learning": {
"total": 431.4729395270001,
"count": 1,
"self": 0.33504929599848765,
"children": {
"TrainerController._reset_env": {
"total": 3.3751126100000874,
"count": 1,
"self": 3.3751126100000874
},
"TrainerController.advance": {
"total": 427.67682926100133,
"count": 18192,
"self": 0.35742212198101697,
"children": {
"env_step": {
"total": 303.53289770999663,
"count": 18192,
"self": 230.19323865802744,
"children": {
"SubprocessEnvManager._take_step": {
"total": 73.13666145699017,
"count": 18192,
"self": 1.2760460549561685,
"children": {
"TorchPolicy.evaluate": {
"total": 71.860615402034,
"count": 18192,
"self": 71.860615402034
}
}
},
"workers": {
"total": 0.20299759497902414,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 430.0900566359601,
"count": 18192,
"is_parallel": true,
"self": 227.9387275189597,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006129881000106252,
"count": 1,
"is_parallel": true,
"self": 0.0041792989995883545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019505820005178975,
"count": 10,
"is_parallel": true,
"self": 0.0019505820005178975
}
}
},
"UnityEnvironment.step": {
"total": 0.03666919300030713,
"count": 1,
"is_parallel": true,
"self": 0.0006110850004006352,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040331399986826,
"count": 1,
"is_parallel": true,
"self": 0.00040331399986826
},
"communicator.exchange": {
"total": 0.033780173000195646,
"count": 1,
"is_parallel": true,
"self": 0.033780173000195646
},
"steps_from_proto": {
"total": 0.0018746209998425911,
"count": 1,
"is_parallel": true,
"self": 0.0003779879993999202,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014966330004426709,
"count": 10,
"is_parallel": true,
"self": 0.0014966330004426709
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 202.1513291170004,
"count": 18191,
"is_parallel": true,
"self": 9.776712402954217,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.4082795120316405,
"count": 18191,
"is_parallel": true,
"self": 5.4082795120316405
},
"communicator.exchange": {
"total": 154.86586740601297,
"count": 18191,
"is_parallel": true,
"self": 154.86586740601297
},
"steps_from_proto": {
"total": 32.10046979600156,
"count": 18191,
"is_parallel": true,
"self": 5.7488871170394304,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.35158267896213,
"count": 181910,
"is_parallel": true,
"self": 26.35158267896213
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 123.78650942902368,
"count": 18192,
"self": 0.4212557260284484,
"children": {
"process_trajectory": {
"total": 27.730596646996673,
"count": 18192,
"self": 27.300991365997106,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4296052809995672,
"count": 4,
"self": 0.4296052809995672
}
}
},
"_update_policy": {
"total": 95.63465705599856,
"count": 90,
"self": 37.96626129000924,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.66839576598932,
"count": 4587,
"self": 57.66839576598932
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0150001799047459e-06,
"count": 1,
"self": 1.0150001799047459e-06
},
"TrainerController._save_models": {
"total": 0.08594734500002232,
"count": 1,
"self": 0.000945894999858865,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08500145000016346,
"count": 1,
"self": 0.08500145000016346
}
}
}
}
}
}
}