pumpitup521's picture
First training of SnowballTarget
aeb3be5
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0238357782363892,
"min": 0.9990935325622559,
"max": 2.8526418209075928,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9798.1083984375,
"min": 9798.1083984375,
"max": 29276.6640625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.005125999450684,
"min": 0.3493381440639496,
"max": 13.005125999450684,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2535.99951171875,
"min": 67.77159881591797,
"max": 2622.282470703125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06395740239531733,
"min": 0.06395740239531733,
"max": 0.07707957233266344,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2558296095812693,
"min": 0.2558296095812693,
"max": 0.3696486450481804,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2122765133778254,
"min": 0.11082634554751326,
"max": 0.2861759309821269,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8491060535113016,
"min": 0.44330538219005305,
"max": 1.39886769360187,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.636363636363637,
"min": 3.522727272727273,
"max": 25.75,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1128.0,
"min": 155.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.636363636363637,
"min": 3.522727272727273,
"max": 25.75,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1128.0,
"min": 155.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685624296",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685624911"
},
"total": 615.5173450059999,
"count": 1,
"self": 0.5465786059999118,
"children": {
"run_training.setup": {
"total": 0.08128531800002747,
"count": 1,
"self": 0.08128531800002747
},
"TrainerController.start_learning": {
"total": 614.889481082,
"count": 1,
"self": 0.8962865190121647,
"children": {
"TrainerController._reset_env": {
"total": 1.3602169389998835,
"count": 1,
"self": 1.3602169389998835
},
"TrainerController.advance": {
"total": 612.4653537139877,
"count": 18205,
"self": 0.4303196180287614,
"children": {
"env_step": {
"total": 612.035034095959,
"count": 18205,
"self": 496.4123892699736,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.19989036200946,
"count": 18205,
"self": 2.6846800750092825,
"children": {
"TorchPolicy.evaluate": {
"total": 112.51521028700017,
"count": 18205,
"self": 112.51521028700017
}
}
},
"workers": {
"total": 0.4227544639759344,
"count": 18205,
"self": 0.0,
"children": {
"worker_root": {
"total": 612.5113162580062,
"count": 18205,
"is_parallel": true,
"self": 265.25628729997607,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007688541999868903,
"count": 1,
"is_parallel": true,
"self": 0.005091637000077753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025969049997911497,
"count": 10,
"is_parallel": true,
"self": 0.0025969049997911497
}
}
},
"UnityEnvironment.step": {
"total": 0.1180754080000952,
"count": 1,
"is_parallel": true,
"self": 0.000787865000347665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000498247999985324,
"count": 1,
"is_parallel": true,
"self": 0.000498247999985324
},
"communicator.exchange": {
"total": 0.11174718499978553,
"count": 1,
"is_parallel": true,
"self": 0.11174718499978553
},
"steps_from_proto": {
"total": 0.005042109999976674,
"count": 1,
"is_parallel": true,
"self": 0.0015661079999063077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003476002000070366,
"count": 10,
"is_parallel": true,
"self": 0.003476002000070366
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 347.25502895803015,
"count": 18204,
"is_parallel": true,
"self": 14.34028698798079,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.850361105010734,
"count": 18204,
"is_parallel": true,
"self": 7.850361105010734
},
"communicator.exchange": {
"total": 278.5309019390086,
"count": 18204,
"is_parallel": true,
"self": 278.5309019390086
},
"steps_from_proto": {
"total": 46.53347892603006,
"count": 18204,
"is_parallel": true,
"self": 9.343052555955865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.19042637007419,
"count": 182040,
"is_parallel": true,
"self": 37.19042637007419
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002055630002359976,
"count": 1,
"self": 0.0002055630002359976,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 606.9817199309755,
"count": 617585,
"is_parallel": true,
"self": 15.043643329979659,
"children": {
"process_trajectory": {
"total": 328.39409023899543,
"count": 617585,
"is_parallel": true,
"self": 326.74501049799596,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6490797409994684,
"count": 4,
"is_parallel": true,
"self": 1.6490797409994684
}
}
},
"_update_policy": {
"total": 263.5439863620004,
"count": 90,
"is_parallel": true,
"self": 90.16529423599991,
"children": {
"TorchPPOOptimizer.update": {
"total": 173.3786921260005,
"count": 4587,
"is_parallel": true,
"self": 173.3786921260005
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.16741834699996616,
"count": 1,
"self": 0.0014154460000099789,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16600290099995618,
"count": 1,
"self": 0.16600290099995618
}
}
}
}
}
}
}