Max100ce's picture
First Push
3f96264
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0239914655685425,
"min": 1.0239914655685425,
"max": 2.864828109741211,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9788.333984375,
"min": 9788.333984375,
"max": 29338.705078125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.90246868133545,
"min": 0.40310999751091003,
"max": 12.90246868133545,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2515.9814453125,
"min": 78.20333862304688,
"max": 2612.596923828125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06671248832728952,
"min": 0.06422642952480494,
"max": 0.07551683201089356,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26684995330915806,
"min": 0.25690571809921975,
"max": 0.36442226998042315,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19994673110982952,
"min": 0.12157122489503201,
"max": 0.272834480100987,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7997869244393181,
"min": 0.48628489958012805,
"max": 1.3461834630545448,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.795454545454547,
"min": 3.4318181818181817,
"max": 25.795454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1135.0,
"min": 151.0,
"max": 1390.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.795454545454547,
"min": 3.4318181818181817,
"max": 25.795454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1135.0,
"min": 151.0,
"max": 1390.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678749735",
"python_version": "3.8.16 (default, Jan 17 2023, 23:13:24) \n[GCC 11.2.0]",
"command_line_arguments": "/anaconda/bin//mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.20.0",
"end_time_seconds": "1678750128"
},
"total": 392.96794632599995,
"count": 1,
"self": 0.32255737699961173,
"children": {
"run_training.setup": {
"total": 0.027895264000108,
"count": 1,
"self": 0.027895264000108
},
"TrainerController.start_learning": {
"total": 392.6174936850002,
"count": 1,
"self": 0.49473631594810286,
"children": {
"TrainerController._reset_env": {
"total": 3.052494094001304,
"count": 1,
"self": 3.052494094001304
},
"TrainerController.advance": {
"total": 388.9592534960466,
"count": 18206,
"self": 0.23496339013581746,
"children": {
"env_step": {
"total": 388.72429010591077,
"count": 18206,
"self": 279.3013365187835,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.17209053985061,
"count": 18206,
"self": 1.1741430691436108,
"children": {
"TorchPolicy.evaluate": {
"total": 107.997947470707,
"count": 18206,
"self": 20.604595213986613,
"children": {
"TorchPolicy.sample_actions": {
"total": 87.39335225672039,
"count": 18206,
"self": 87.39335225672039
}
}
}
}
},
"workers": {
"total": 0.25086304727665265,
"count": 18206,
"self": 0.0,
"children": {
"worker_root": {
"total": 391.9004243369236,
"count": 18206,
"is_parallel": true,
"self": 175.23012058401582,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019788380013778806,
"count": 1,
"is_parallel": true,
"self": 0.00047347099462058395,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015053670067572966,
"count": 10,
"is_parallel": true,
"self": 0.0015053670067572966
}
}
},
"UnityEnvironment.step": {
"total": 0.040273700000398094,
"count": 1,
"is_parallel": true,
"self": 0.0003983370006608311,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000276102997304406,
"count": 1,
"is_parallel": true,
"self": 0.000276102997304406
},
"communicator.exchange": {
"total": 0.0383349920011824,
"count": 1,
"is_parallel": true,
"self": 0.0383349920011824
},
"steps_from_proto": {
"total": 0.001264268001250457,
"count": 1,
"is_parallel": true,
"self": 0.000224508999963291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001039759001287166,
"count": 10,
"is_parallel": true,
"self": 0.001039759001287166
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 216.67030375290778,
"count": 18205,
"is_parallel": true,
"self": 10.816170322559628,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.964858332219592,
"count": 18205,
"is_parallel": true,
"self": 4.964858332219592
},
"communicator.exchange": {
"total": 170.6012590451246,
"count": 18205,
"is_parallel": true,
"self": 170.6012590451246
},
"steps_from_proto": {
"total": 30.288016053003957,
"count": 18205,
"is_parallel": true,
"self": 5.42294947708433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.865066575919627,
"count": 182050,
"is_parallel": true,
"self": 24.865066575919627
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.917600305518135e-05,
"count": 1,
"self": 7.917600305518135e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 387.70098901121673,
"count": 243651,
"is_parallel": true,
"self": 4.291233580304834,
"children": {
"process_trajectory": {
"total": 223.71606679089746,
"count": 243651,
"is_parallel": true,
"self": 222.87514878989532,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8409180010021373,
"count": 4,
"is_parallel": true,
"self": 0.8409180010021373
}
}
},
"_update_policy": {
"total": 159.69368864001444,
"count": 90,
"is_parallel": true,
"self": 36.720835636027914,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.97285300398653,
"count": 4587,
"is_parallel": true,
"self": 122.97285300398653
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1109306030011794,
"count": 1,
"self": 0.001396884003042942,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10953371899813646,
"count": 1,
"self": 0.10953371899813646
}
}
}
}
}
}
}