SamuelReyes's picture
First Push
4b44a8b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9872178435325623,
"min": 0.9872178435325623,
"max": 2.864316463470459,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9447.6748046875,
"min": 9447.6748046875,
"max": 29427.98828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.12951946258545,
"min": 0.49277621507644653,
"max": 13.12951946258545,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2560.25634765625,
"min": 95.59858703613281,
"max": 2669.49609375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07206193343211222,
"min": 0.062253106212652065,
"max": 0.07718792081081224,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28824773372844886,
"min": 0.24901242485060826,
"max": 0.38593960405406125,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19436689385888622,
"min": 0.14922309038914083,
"max": 0.2859529435050254,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7774675754355449,
"min": 0.5968923615565633,
"max": 1.4297647175251271,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.429097306000008e-06,
"min": 9.429097306000008e-06,
"max": 0.0003405290027059999,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.771638922400003e-05,
"min": 3.771638922400003e-05,
"max": 0.00161602003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.068181818181817,
"min": 4.0227272727272725,
"max": 26.068181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1147.0,
"min": 177.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.068181818181817,
"min": 4.0227272727272725,
"max": 26.068181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1147.0,
"min": 177.0,
"max": 1424.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691853028",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691853521"
},
"total": 492.39752079699997,
"count": 1,
"self": 0.8058973659999538,
"children": {
"run_training.setup": {
"total": 0.04267904699997871,
"count": 1,
"self": 0.04267904699997871
},
"TrainerController.start_learning": {
"total": 491.54894438400004,
"count": 1,
"self": 0.557145191010818,
"children": {
"TrainerController._reset_env": {
"total": 4.26308811399997,
"count": 1,
"self": 4.26308811399997
},
"TrainerController.advance": {
"total": 486.4905491589892,
"count": 18214,
"self": 0.2833127629767205,
"children": {
"env_step": {
"total": 486.2072363960125,
"count": 18214,
"self": 354.1492947799767,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.76670779102471,
"count": 18214,
"self": 1.8915502300277467,
"children": {
"TorchPolicy.evaluate": {
"total": 129.87515756099697,
"count": 18214,
"self": 129.87515756099697
}
}
},
"workers": {
"total": 0.29123382501109063,
"count": 18214,
"self": 0.0,
"children": {
"worker_root": {
"total": 489.80162410300886,
"count": 18214,
"is_parallel": true,
"self": 232.03202809399534,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006038450000005469,
"count": 1,
"is_parallel": true,
"self": 0.0045601340000303026,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001478315999975166,
"count": 10,
"is_parallel": true,
"self": 0.001478315999975166
}
}
},
"UnityEnvironment.step": {
"total": 0.039450494999982766,
"count": 1,
"is_parallel": true,
"self": 0.0007195629999614539,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034687499999108695,
"count": 1,
"is_parallel": true,
"self": 0.00034687499999108695
},
"communicator.exchange": {
"total": 0.03617259899999681,
"count": 1,
"is_parallel": true,
"self": 0.03617259899999681
},
"steps_from_proto": {
"total": 0.002211458000033417,
"count": 1,
"is_parallel": true,
"self": 0.00039111000000957574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018203480000238415,
"count": 10,
"is_parallel": true,
"self": 0.0018203480000238415
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 257.7695960090135,
"count": 18213,
"is_parallel": true,
"self": 11.04661722101207,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.6768374609990815,
"count": 18213,
"is_parallel": true,
"self": 5.6768374609990815
},
"communicator.exchange": {
"total": 203.44631532900485,
"count": 18213,
"is_parallel": true,
"self": 203.44631532900485
},
"steps_from_proto": {
"total": 37.59982599799753,
"count": 18213,
"is_parallel": true,
"self": 6.967339698023579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.632486299973948,
"count": 182130,
"is_parallel": true,
"self": 30.632486299973948
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001729949999571545,
"count": 1,
"self": 0.0001729949999571545,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 482.8354018140391,
"count": 447608,
"is_parallel": true,
"self": 10.027455348114472,
"children": {
"process_trajectory": {
"total": 264.4766707779247,
"count": 447608,
"is_parallel": true,
"self": 263.3299029989248,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1467677789999016,
"count": 4,
"is_parallel": true,
"self": 1.1467677789999016
}
}
},
"_update_policy": {
"total": 208.3312756879999,
"count": 90,
"is_parallel": true,
"self": 82.15911936700093,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.17215632099897,
"count": 4587,
"is_parallel": true,
"self": 126.17215632099897
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2379889250000815,
"count": 1,
"self": 0.001198662000206241,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23679026299987527,
"count": 1,
"self": 0.23679026299987527
}
}
}
}
}
}
}