SnowballTarget1 / run_logs /timers.json
RexF4lcon's picture
Snowball training successful
e70354a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8975753784179688,
"min": 0.8975753784179688,
"max": 2.8597354888916016,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8530.556640625,
"min": 8530.556640625,
"max": 29192.1796875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.776655197143555,
"min": 0.46246621012687683,
"max": 12.885666847229004,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2491.44775390625,
"min": 89.71844482421875,
"max": 2636.5,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07192382876340109,
"min": 0.061896672252766495,
"max": 0.07233163446068705,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28769531505360435,
"min": 0.2539309578389614,
"max": 0.34788263919900225,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22757543382399226,
"min": 0.12895768764314663,
"max": 0.30576228961056356,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.910301735295969,
"min": 0.5158307505725865,
"max": 1.5288114480528179,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.136363636363637,
"min": 3.5,
"max": 25.568181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1106.0,
"min": 154.0,
"max": 1400.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.136363636363637,
"min": 3.5,
"max": 25.568181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1106.0,
"min": 154.0,
"max": 1400.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742418441",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742418883"
},
"total": 442.240541192,
"count": 1,
"self": 0.43804675900003076,
"children": {
"run_training.setup": {
"total": 0.023282254000037028,
"count": 1,
"self": 0.023282254000037028
},
"TrainerController.start_learning": {
"total": 441.77921217899996,
"count": 1,
"self": 0.3571841920040697,
"children": {
"TrainerController._reset_env": {
"total": 3.062101415000029,
"count": 1,
"self": 3.062101415000029
},
"TrainerController.advance": {
"total": 438.26972194099585,
"count": 18192,
"self": 0.37662847498836527,
"children": {
"env_step": {
"total": 312.29938751100747,
"count": 18192,
"self": 236.34124002902308,
"children": {
"SubprocessEnvManager._take_step": {
"total": 75.74165262999998,
"count": 18192,
"self": 1.3496447489977754,
"children": {
"TorchPolicy.evaluate": {
"total": 74.3920078810022,
"count": 18192,
"self": 74.3920078810022
}
}
},
"workers": {
"total": 0.21649485198440743,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 440.247598570999,
"count": 18192,
"is_parallel": true,
"self": 232.61559110898577,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006181594999929985,
"count": 1,
"is_parallel": true,
"self": 0.004596318000039901,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001585276999890084,
"count": 10,
"is_parallel": true,
"self": 0.001585276999890084
}
}
},
"UnityEnvironment.step": {
"total": 0.034448657999973875,
"count": 1,
"is_parallel": true,
"self": 0.0005470569999488362,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040865500000109023,
"count": 1,
"is_parallel": true,
"self": 0.00040865500000109023
},
"communicator.exchange": {
"total": 0.03158952199999021,
"count": 1,
"is_parallel": true,
"self": 0.03158952199999021
},
"steps_from_proto": {
"total": 0.0019034240000337377,
"count": 1,
"is_parallel": true,
"self": 0.0003773279998995349,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015260960001342028,
"count": 10,
"is_parallel": true,
"self": 0.0015260960001342028
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 207.63200746201323,
"count": 18191,
"is_parallel": true,
"self": 9.977580674981368,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.616386188005777,
"count": 18191,
"is_parallel": true,
"self": 5.616386188005777
},
"communicator.exchange": {
"total": 158.68438843200784,
"count": 18191,
"is_parallel": true,
"self": 158.68438843200784
},
"steps_from_proto": {
"total": 33.353652167018254,
"count": 18191,
"is_parallel": true,
"self": 6.013333369951283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.34031879706697,
"count": 181910,
"is_parallel": true,
"self": 27.34031879706697
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 125.59370595500002,
"count": 18192,
"self": 0.4356591189958863,
"children": {
"process_trajectory": {
"total": 28.031761015004236,
"count": 18192,
"self": 27.422790016004114,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6089709990001211,
"count": 4,
"self": 0.6089709990001211
}
}
},
"_update_policy": {
"total": 97.1262858209999,
"count": 90,
"self": 38.76279023499933,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.36349558600057,
"count": 4587,
"self": 58.36349558600057
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.43000031838892e-07,
"count": 1,
"self": 9.43000031838892e-07
},
"TrainerController._save_models": {
"total": 0.09020368799997414,
"count": 1,
"self": 0.0009285319999889907,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08927515599998515,
"count": 1,
"self": 0.08927515599998515
}
}
}
}
}
}
}