Atom1AN's picture
First Push
e2e1ede verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6866129040718079,
"min": 0.6866129040718079,
"max": 2.853877544403076,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6525.56884765625,
"min": 6525.56884765625,
"max": 29132.3828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.990679740905762,
"min": 0.45783981680870056,
"max": 12.990679740905762,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2533.1826171875,
"min": 88.8209228515625,
"max": 2618.18359375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06691247674048188,
"min": 0.06000601548847614,
"max": 0.07770210306854079,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26764990696192753,
"min": 0.2575401519470866,
"max": 0.37759528810948173,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2146731256416031,
"min": 0.12147532986915288,
"max": 0.29513194321417335,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8586925025664124,
"min": 0.4859013194766115,
"max": 1.4756597160708669,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.068181818181817,
"min": 3.772727272727273,
"max": 26.068181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1147.0,
"min": 166.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.068181818181817,
"min": 3.772727272727273,
"max": 26.068181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1147.0,
"min": 166.0,
"max": 1397.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1754317021",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1754317441"
},
"total": 420.0381634790001,
"count": 1,
"self": 0.6130719580000914,
"children": {
"run_training.setup": {
"total": 0.02762943599987011,
"count": 1,
"self": 0.02762943599987011
},
"TrainerController.start_learning": {
"total": 419.39746208500014,
"count": 1,
"self": 0.3455291270101952,
"children": {
"TrainerController._reset_env": {
"total": 3.275865850999935,
"count": 1,
"self": 3.275865850999935
},
"TrainerController.advance": {
"total": 415.69602424998993,
"count": 18192,
"self": 0.3706213749503604,
"children": {
"env_step": {
"total": 298.37872758000253,
"count": 18192,
"self": 229.96664159703482,
"children": {
"SubprocessEnvManager._take_step": {
"total": 68.21523186696027,
"count": 18192,
"self": 1.2070050829729553,
"children": {
"TorchPolicy.evaluate": {
"total": 67.00822678398731,
"count": 18192,
"self": 67.00822678398731
}
}
},
"workers": {
"total": 0.19685411600744374,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 418.0379529290112,
"count": 18192,
"is_parallel": true,
"self": 215.6557529030115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005650754000043889,
"count": 1,
"is_parallel": true,
"self": 0.0036516600000595645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001999093999984325,
"count": 10,
"is_parallel": true,
"self": 0.001999093999984325
}
}
},
"UnityEnvironment.step": {
"total": 0.03480870099997446,
"count": 1,
"is_parallel": true,
"self": 0.000566767999998774,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003896989999248035,
"count": 1,
"is_parallel": true,
"self": 0.0003896989999248035
},
"communicator.exchange": {
"total": 0.0317558189999545,
"count": 1,
"is_parallel": true,
"self": 0.0317558189999545
},
"steps_from_proto": {
"total": 0.0020964150000963855,
"count": 1,
"is_parallel": true,
"self": 0.00040056600005300425,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016958490000433812,
"count": 10,
"is_parallel": true,
"self": 0.0016958490000433812
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 202.38220002599974,
"count": 18191,
"is_parallel": true,
"self": 9.712377696067279,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.4473396909688745,
"count": 18191,
"is_parallel": true,
"self": 5.4473396909688745
},
"communicator.exchange": {
"total": 155.65307796797333,
"count": 18191,
"is_parallel": true,
"self": 155.65307796797333
},
"steps_from_proto": {
"total": 31.569404670990252,
"count": 18191,
"is_parallel": true,
"self": 5.523973820992296,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.045430849997956,
"count": 181910,
"is_parallel": true,
"self": 26.045430849997956
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 116.94667529503704,
"count": 18192,
"self": 0.4045793480172506,
"children": {
"process_trajectory": {
"total": 26.011600184020608,
"count": 18192,
"self": 25.604205475020308,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4073947090003003,
"count": 4,
"self": 0.4073947090003003
}
}
},
"_update_policy": {
"total": 90.53049576299918,
"count": 90,
"self": 38.09753823799383,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.43295752500535,
"count": 4587,
"self": 52.43295752500535
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0160001693293452e-06,
"count": 1,
"self": 1.0160001693293452e-06
},
"TrainerController._save_models": {
"total": 0.08004184099991107,
"count": 1,
"self": 0.0008365480002794357,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07920529299963164,
"count": 1,
"self": 0.07920529299963164
}
}
}
}
}
}
}