Huinker's picture
First Push
ee83851 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9423902034759521,
"min": 0.9423902034759521,
"max": 2.859898090362549,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8956.4765625,
"min": 8956.4765625,
"max": 29193.83984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.816398620605469,
"min": 0.2950589954853058,
"max": 12.816398620605469,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2499.19775390625,
"min": 57.24144744873047,
"max": 2573.84912109375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.061741000360366435,
"min": 0.061741000360366435,
"max": 0.0772476778538012,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24696400144146574,
"min": 0.24696400144146574,
"max": 0.386238389269006,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20111988828170535,
"min": 0.09970296314214447,
"max": 0.30335797427916056,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8044795531268214,
"min": 0.3988118525685779,
"max": 1.5167898713958028,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.204545454545453,
"min": 2.9318181818181817,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1109.0,
"min": 129.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.204545454545453,
"min": 2.9318181818181817,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1109.0,
"min": 129.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1755832772",
"python_version": "3.10.18 (main, Aug 8 2025, 17:07:22) [Clang 20.1.4 ]",
"command_line_arguments": "/home/huinker/Coding/python/.venv/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1755832975"
},
"total": 202.694405083,
"count": 1,
"self": 0.26835372699997606,
"children": {
"run_training.setup": {
"total": 0.015214607999951113,
"count": 1,
"self": 0.015214607999951113
},
"TrainerController.start_learning": {
"total": 202.41083674800007,
"count": 1,
"self": 0.26841272400406524,
"children": {
"TrainerController._reset_env": {
"total": 1.6697769190000145,
"count": 1,
"self": 1.6697769190000145
},
"TrainerController.advance": {
"total": 200.4313081239959,
"count": 18192,
"self": 0.28215874797297147,
"children": {
"env_step": {
"total": 143.76388440901064,
"count": 18192,
"self": 112.34426943699668,
"children": {
"SubprocessEnvManager._take_step": {
"total": 31.23544131200447,
"count": 18192,
"self": 0.6575951830111535,
"children": {
"TorchPolicy.evaluate": {
"total": 30.577846128993315,
"count": 18192,
"self": 30.577846128993315
}
}
},
"workers": {
"total": 0.18417366000949187,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 201.96543777899296,
"count": 18192,
"is_parallel": true,
"self": 104.47833280298391,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011029550000785093,
"count": 1,
"is_parallel": true,
"self": 0.0003146220001326583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000788332999945851,
"count": 10,
"is_parallel": true,
"self": 0.000788332999945851
}
}
},
"UnityEnvironment.step": {
"total": 0.021499911000091743,
"count": 1,
"is_parallel": true,
"self": 0.00024338599996553967,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00028019100000165054,
"count": 1,
"is_parallel": true,
"self": 0.00028019100000165054
},
"communicator.exchange": {
"total": 0.020375585000010688,
"count": 1,
"is_parallel": true,
"self": 0.020375585000010688
},
"steps_from_proto": {
"total": 0.0006007490001138649,
"count": 1,
"is_parallel": true,
"self": 0.00014840600010757043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045234300000629446,
"count": 10,
"is_parallel": true,
"self": 0.00045234300000629446
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 97.48710497600905,
"count": 18191,
"is_parallel": true,
"self": 3.782894856017265,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.501437766995423,
"count": 18191,
"is_parallel": true,
"self": 2.501437766995423
},
"communicator.exchange": {
"total": 79.73773491400175,
"count": 18191,
"is_parallel": true,
"self": 79.73773491400175
},
"steps_from_proto": {
"total": 11.465037438994614,
"count": 18191,
"is_parallel": true,
"self": 2.4337478130612453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.031289625933368,
"count": 181910,
"is_parallel": true,
"self": 9.031289625933368
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 56.38526496701229,
"count": 18192,
"self": 0.42034302100955756,
"children": {
"process_trajectory": {
"total": 12.868588097003112,
"count": 18192,
"self": 12.626173756003254,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2424143409998578,
"count": 4,
"self": 0.2424143409998578
}
}
},
"_update_policy": {
"total": 43.09633384899962,
"count": 90,
"self": 18.242637863009804,
"children": {
"TorchPPOOptimizer.update": {
"total": 24.853695985989816,
"count": 4587,
"self": 24.853695985989816
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1869999525515595e-06,
"count": 1,
"self": 1.1869999525515595e-06
},
"TrainerController._save_models": {
"total": 0.04133779400012827,
"count": 1,
"self": 0.0006601140000839223,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04067768000004435,
"count": 1,
"self": 0.04067768000004435
}
}
}
}
}
}
}