Emperor's picture
Second Push
70b7ce4
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9199580550193787,
"min": 0.9199580550193787,
"max": 2.859647274017334,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8884.955078125,
"min": 8884.955078125,
"max": 29285.6484375,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.313934326171875,
"min": 0.3746393322944641,
"max": 14.313934326171875,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2934.3564453125,
"min": 72.6800308227539,
"max": 2934.3564453125,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07031824886399395,
"min": 0.06148489501827177,
"max": 0.07564425400938204,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.35159124431996974,
"min": 0.24593958007308708,
"max": 0.3782212700469102,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14345932173378326,
"min": 0.125219828272284,
"max": 0.2772978394054899,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7172966086689163,
"min": 0.500879313089136,
"max": 1.3864891970274495,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0176098982400003e-05,
"min": 1.0176098982400003e-05,
"max": 0.0009891760010824,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.088049491200002e-05,
"min": 5.088049491200002e-05,
"max": 0.0048468800153119995,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891759999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.41199040000000003,
"max": 0.984688,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.0242359312,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.163636363636364,
"min": 3.7954545454545454,
"max": 28.163636363636364,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1549.0,
"min": 167.0,
"max": 1549.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.163636363636364,
"min": 3.7954545454545454,
"max": 28.163636363636364,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1549.0,
"min": 167.0,
"max": 1549.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679254940",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679256141"
},
"total": 1201.385482559,
"count": 1,
"self": 0.4781768689999808,
"children": {
"run_training.setup": {
"total": 0.11293612300005407,
"count": 1,
"self": 0.11293612300005407
},
"TrainerController.start_learning": {
"total": 1200.7943695669999,
"count": 1,
"self": 1.3772264579829425,
"children": {
"TrainerController._reset_env": {
"total": 5.695831886000178,
"count": 1,
"self": 5.695831886000178
},
"TrainerController.advance": {
"total": 1193.5770941590165,
"count": 45477,
"self": 0.7323325730444594,
"children": {
"env_step": {
"total": 1192.844761585972,
"count": 45477,
"self": 853.5567951529365,
"children": {
"SubprocessEnvManager._take_step": {
"total": 338.45618794800794,
"count": 45477,
"self": 6.51865096495203,
"children": {
"TorchPolicy.evaluate": {
"total": 331.9375369830559,
"count": 45477,
"self": 331.9375369830559
}
}
},
"workers": {
"total": 0.8317784850275984,
"count": 45477,
"self": 0.0,
"children": {
"worker_root": {
"total": 1197.0013607001042,
"count": 45477,
"is_parallel": true,
"self": 575.8549901870911,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004766266000160613,
"count": 1,
"is_parallel": true,
"self": 0.0009646200003317063,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0038016459998289065,
"count": 10,
"is_parallel": true,
"self": 0.0038016459998289065
}
}
},
"UnityEnvironment.step": {
"total": 0.03443235100007769,
"count": 1,
"is_parallel": true,
"self": 0.0005762319999575993,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00032327000008081086,
"count": 1,
"is_parallel": true,
"self": 0.00032327000008081086
},
"communicator.exchange": {
"total": 0.031719237000061185,
"count": 1,
"is_parallel": true,
"self": 0.031719237000061185
},
"steps_from_proto": {
"total": 0.001813611999978093,
"count": 1,
"is_parallel": true,
"self": 0.0003828879998764023,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014307240001016908,
"count": 10,
"is_parallel": true,
"self": 0.0014307240001016908
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 621.1463705130132,
"count": 45476,
"is_parallel": true,
"self": 24.27404915500665,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.478119645036713,
"count": 45476,
"is_parallel": true,
"self": 13.478119645036713
},
"communicator.exchange": {
"total": 500.70108741897366,
"count": 45476,
"is_parallel": true,
"self": 500.70108741897366
},
"steps_from_proto": {
"total": 82.69311429399613,
"count": 45476,
"is_parallel": true,
"self": 16.71420779207847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 65.97890650191766,
"count": 454760,
"is_parallel": true,
"self": 65.97890650191766
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012102300024707802,
"count": 1,
"self": 0.00012102300024707802,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1184.289002753099,
"count": 1069761,
"is_parallel": true,
"self": 26.995271159279127,
"children": {
"process_trajectory": {
"total": 655.510740815818,
"count": 1069761,
"is_parallel": true,
"self": 652.5665117768178,
"children": {
"RLTrainer._checkpoint": {
"total": 2.94422903900022,
"count": 10,
"is_parallel": true,
"self": 2.94422903900022
}
}
},
"_update_policy": {
"total": 501.78299077800193,
"count": 227,
"is_parallel": true,
"self": 178.76111768896976,
"children": {
"TorchPPOOptimizer.update": {
"total": 323.02187308903217,
"count": 11574,
"is_parallel": true,
"self": 323.02187308903217
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1440960410000116,
"count": 1,
"self": 0.00107322900021245,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14302281199979916,
"count": 1,
"self": 0.14302281199979916
}
}
}
}
}
}
}