Nake's picture
First Push
d128ff9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8730677962303162,
"min": 0.8730677962303162,
"max": 2.8626091480255127,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8345.6552734375,
"min": 8345.6552734375,
"max": 29378.95703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.744796752929688,
"min": 0.3266405463218689,
"max": 12.763436317443848,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2485.2353515625,
"min": 63.368263244628906,
"max": 2603.740966796875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07132283502940874,
"min": 0.06165378722310861,
"max": 0.07359434268037861,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28529134011763496,
"min": 0.24661514889243444,
"max": 0.36196895972306287,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21005372023757765,
"min": 0.10464611242744415,
"max": 0.3053814822552251,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8402148809503106,
"min": 0.4185844497097766,
"max": 1.5269074112761254,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.863636363636363,
"min": 2.8636363636363638,
"max": 25.163636363636364,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1094.0,
"min": 126.0,
"max": 1384.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.863636363636363,
"min": 2.8636363636363638,
"max": 25.163636363636364,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1094.0,
"min": 126.0,
"max": 1384.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681919262",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681919705"
},
"total": 442.997079777,
"count": 1,
"self": 0.4276413359999651,
"children": {
"run_training.setup": {
"total": 0.10982111800001348,
"count": 1,
"self": 0.10982111800001348
},
"TrainerController.start_learning": {
"total": 442.45961732300003,
"count": 1,
"self": 0.5338234190050457,
"children": {
"TrainerController._reset_env": {
"total": 5.117930221999984,
"count": 1,
"self": 5.117930221999984
},
"TrainerController.advance": {
"total": 436.66989579299496,
"count": 18203,
"self": 0.2544147030031354,
"children": {
"env_step": {
"total": 436.41548108999183,
"count": 18203,
"self": 319.8025202369887,
"children": {
"SubprocessEnvManager._take_step": {
"total": 116.3498791260078,
"count": 18203,
"self": 1.6803505470086861,
"children": {
"TorchPolicy.evaluate": {
"total": 114.66952857899912,
"count": 18203,
"self": 114.66952857899912
}
}
},
"workers": {
"total": 0.26308172699532406,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 441.05908244899706,
"count": 18203,
"is_parallel": true,
"self": 204.80662942699706,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006753436000053625,
"count": 1,
"is_parallel": true,
"self": 0.004795261000026585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019581750000270404,
"count": 10,
"is_parallel": true,
"self": 0.0019581750000270404
}
}
},
"UnityEnvironment.step": {
"total": 0.03512440299999753,
"count": 1,
"is_parallel": true,
"self": 0.0005346289999579312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036977199999910226,
"count": 1,
"is_parallel": true,
"self": 0.00036977199999910226
},
"communicator.exchange": {
"total": 0.03243919900000947,
"count": 1,
"is_parallel": true,
"self": 0.03243919900000947
},
"steps_from_proto": {
"total": 0.0017808030000310282,
"count": 1,
"is_parallel": true,
"self": 0.00038587699998515745,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013949260000458708,
"count": 10,
"is_parallel": true,
"self": 0.0013949260000458708
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 236.252453022,
"count": 18202,
"is_parallel": true,
"self": 9.526775144982253,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.303414444004034,
"count": 18202,
"is_parallel": true,
"self": 5.303414444004034
},
"communicator.exchange": {
"total": 191.20009454900156,
"count": 18202,
"is_parallel": true,
"self": 191.20009454900156
},
"steps_from_proto": {
"total": 30.22216888401215,
"count": 18202,
"is_parallel": true,
"self": 5.842779451024683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.379389432987466,
"count": 182020,
"is_parallel": true,
"self": 24.379389432987466
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011901499999567022,
"count": 1,
"self": 0.00011901499999567022,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 433.26735278798407,
"count": 393433,
"is_parallel": true,
"self": 9.724779921951779,
"children": {
"process_trajectory": {
"total": 239.2759214750322,
"count": 393433,
"is_parallel": true,
"self": 238.52423942703228,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7516820479999069,
"count": 4,
"is_parallel": true,
"self": 0.7516820479999069
}
}
},
"_update_policy": {
"total": 184.2666513910001,
"count": 90,
"is_parallel": true,
"self": 67.5633291180007,
"children": {
"TorchPPOOptimizer.update": {
"total": 116.7033222729994,
"count": 4587,
"is_parallel": true,
"self": 116.7033222729994
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1378488740000421,
"count": 1,
"self": 0.0009162019999848781,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1369326720000572,
"count": 1,
"self": 0.1369326720000572
}
}
}
}
}
}
}