Periramm's picture
First Push
93397e0
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9143757820129395,
"min": 0.9143757820129395,
"max": 2.8693699836730957,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8730.4599609375,
"min": 8730.4599609375,
"max": 29448.34375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.98841381072998,
"min": 0.3694496154785156,
"max": 12.98841381072998,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2532.74072265625,
"min": 71.67322540283203,
"max": 2633.899169921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06586993849666961,
"min": 0.05817899737831241,
"max": 0.07770887466276512,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26347975398667844,
"min": 0.23271598951324965,
"max": 0.3713956201756054,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19295382770874048,
"min": 0.11442176423574268,
"max": 0.2706481791319516,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7718153108349619,
"min": 0.45768705694297074,
"max": 1.349098935759827,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.130097290000004e-06,
"min": 8.130097290000004e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.2520389160000015e-05,
"min": 3.2520389160000015e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10271,
"min": 0.10271,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41084,
"min": 0.41084,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014522900000000008,
"min": 0.00014522900000000008,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005809160000000003,
"min": 0.0005809160000000003,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.545454545454547,
"min": 2.9318181818181817,
"max": 25.854545454545455,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1124.0,
"min": 129.0,
"max": 1422.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.545454545454547,
"min": 2.9318181818181817,
"max": 25.854545454545455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1124.0,
"min": 129.0,
"max": 1422.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679566886",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679567384"
},
"total": 497.525311655,
"count": 1,
"self": 0.3863322890000518,
"children": {
"run_training.setup": {
"total": 0.11009324099995865,
"count": 1,
"self": 0.11009324099995865
},
"TrainerController.start_learning": {
"total": 497.028886125,
"count": 1,
"self": 0.5928713349992449,
"children": {
"TrainerController._reset_env": {
"total": 8.150715314000081,
"count": 1,
"self": 8.150715314000081
},
"TrainerController.advance": {
"total": 488.14521712200064,
"count": 18202,
"self": 0.31401544901621037,
"children": {
"env_step": {
"total": 487.83120167298443,
"count": 18202,
"self": 352.55129507498793,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.9833390270013,
"count": 18202,
"self": 2.596970070010343,
"children": {
"TorchPolicy.evaluate": {
"total": 132.38636895699096,
"count": 18202,
"self": 132.38636895699096
}
}
},
"workers": {
"total": 0.2965675709951938,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 495.40788562301464,
"count": 18202,
"is_parallel": true,
"self": 235.31455283201842,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005438023000010617,
"count": 1,
"is_parallel": true,
"self": 0.003718365000167978,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017196579998426387,
"count": 10,
"is_parallel": true,
"self": 0.0017196579998426387
}
}
},
"UnityEnvironment.step": {
"total": 0.10862353400000302,
"count": 1,
"is_parallel": true,
"self": 0.0005739869999388247,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005056449999756296,
"count": 1,
"is_parallel": true,
"self": 0.0005056449999756296
},
"communicator.exchange": {
"total": 0.10557656700007101,
"count": 1,
"is_parallel": true,
"self": 0.10557656700007101
},
"steps_from_proto": {
"total": 0.001967335000017556,
"count": 1,
"is_parallel": true,
"self": 0.00042640199990273686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015409330001148192,
"count": 10,
"is_parallel": true,
"self": 0.0015409330001148192
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 260.0933327909962,
"count": 18201,
"is_parallel": true,
"self": 10.08613634698213,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.679710370002567,
"count": 18201,
"is_parallel": true,
"self": 5.679710370002567
},
"communicator.exchange": {
"total": 209.68581800101128,
"count": 18201,
"is_parallel": true,
"self": 209.68581800101128
},
"steps_from_proto": {
"total": 34.64166807300023,
"count": 18201,
"is_parallel": true,
"self": 6.9988211020476,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.642846970952633,
"count": 182010,
"is_parallel": true,
"self": 27.642846970952633
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017690100003164844,
"count": 1,
"self": 0.00017690100003164844,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 484.32686593100357,
"count": 436095,
"is_parallel": true,
"self": 10.967339592013786,
"children": {
"process_trajectory": {
"total": 270.8266935819903,
"count": 436095,
"is_parallel": true,
"self": 268.5583658539904,
"children": {
"RLTrainer._checkpoint": {
"total": 2.2683277279999174,
"count": 4,
"is_parallel": true,
"self": 2.2683277279999174
}
}
},
"_update_policy": {
"total": 202.53283275699948,
"count": 90,
"is_parallel": true,
"self": 72.51721605899763,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.01561669800185,
"count": 4584,
"is_parallel": true,
"self": 130.01561669800185
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13990545299998303,
"count": 1,
"self": 0.0008520489998318226,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1390534040001512,
"count": 1,
"self": 0.1390534040001512
}
}
}
}
}
}
}