aphi's picture
First Push
27e0f69
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9896473288536072,
"min": 0.9896473288536072,
"max": 2.854799747467041,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9460.0390625,
"min": 9460.0390625,
"max": 29267.408203125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.686723709106445,
"min": 0.3202313780784607,
"max": 12.686723709106445,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2473.9111328125,
"min": 62.1248893737793,
"max": 2565.1884765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07606518119114208,
"min": 0.0613236937233345,
"max": 0.07606518119114208,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.30426072476456834,
"min": 0.245294774893338,
"max": 0.3610806495074511,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1784119382865873,
"min": 0.09954127031675193,
"max": 0.2910697422775568,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7136477531463492,
"min": 0.3981650812670077,
"max": 1.4553487113877839,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.136363636363637,
"min": 3.1363636363636362,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1106.0,
"min": 138.0,
"max": 1373.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.136363636363637,
"min": 3.1363636363636362,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1106.0,
"min": 138.0,
"max": 1373.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689013154",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689013645"
},
"total": 491.2820943890001,
"count": 1,
"self": 0.6887421110000105,
"children": {
"run_training.setup": {
"total": 0.04139447400029894,
"count": 1,
"self": 0.04139447400029894
},
"TrainerController.start_learning": {
"total": 490.5519578039998,
"count": 1,
"self": 0.5452652330027377,
"children": {
"TrainerController._reset_env": {
"total": 5.508222954999837,
"count": 1,
"self": 5.508222954999837
},
"TrainerController.advance": {
"total": 484.34945344199696,
"count": 18202,
"self": 0.2886183079735929,
"children": {
"env_step": {
"total": 484.06083513402336,
"count": 18202,
"self": 349.7825952180251,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.00253727401514,
"count": 18202,
"self": 1.7563144210780592,
"children": {
"TorchPolicy.evaluate": {
"total": 132.24622285293708,
"count": 18202,
"self": 132.24622285293708
}
}
},
"workers": {
"total": 0.27570264198311634,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 488.9156762140324,
"count": 18202,
"is_parallel": true,
"self": 232.49370278101833,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006802768999932596,
"count": 1,
"is_parallel": true,
"self": 0.0044453929999690445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023573759999635513,
"count": 10,
"is_parallel": true,
"self": 0.0023573759999635513
}
}
},
"UnityEnvironment.step": {
"total": 0.04990630699967369,
"count": 1,
"is_parallel": true,
"self": 0.0006358280002132233,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004637150000235124,
"count": 1,
"is_parallel": true,
"self": 0.0004637150000235124
},
"communicator.exchange": {
"total": 0.04655520099959176,
"count": 1,
"is_parallel": true,
"self": 0.04655520099959176
},
"steps_from_proto": {
"total": 0.0022515629998451914,
"count": 1,
"is_parallel": true,
"self": 0.0005172410001250682,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017343219997201231,
"count": 10,
"is_parallel": true,
"self": 0.0017343219997201231
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 256.42197343301405,
"count": 18201,
"is_parallel": true,
"self": 10.684839514971372,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.623451021005621,
"count": 18201,
"is_parallel": true,
"self": 5.623451021005621
},
"communicator.exchange": {
"total": 201.78136774003724,
"count": 18201,
"is_parallel": true,
"self": 201.78136774003724
},
"steps_from_proto": {
"total": 38.33231515699981,
"count": 18201,
"is_parallel": true,
"self": 6.827348501931283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.50496665506853,
"count": 182010,
"is_parallel": true,
"self": 31.50496665506853
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001275060003536055,
"count": 1,
"self": 0.0001275060003536055,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 480.330417981972,
"count": 466661,
"is_parallel": true,
"self": 10.65843641609581,
"children": {
"process_trajectory": {
"total": 265.9746790508775,
"count": 466661,
"is_parallel": true,
"self": 265.0928378798776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8818411709999054,
"count": 4,
"is_parallel": true,
"self": 0.8818411709999054
}
}
},
"_update_policy": {
"total": 203.69730251499868,
"count": 90,
"is_parallel": true,
"self": 76.28363190001346,
"children": {
"TorchPPOOptimizer.update": {
"total": 127.41367061498522,
"count": 4587,
"is_parallel": true,
"self": 127.41367061498522
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14888866799992684,
"count": 1,
"self": 0.0008206869997593458,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1480679810001675,
"count": 1,
"self": 0.1480679810001675
}
}
}
}
}
}
}