nikxtaco's picture
First Push
3487de6
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9641430974006653,
"min": 0.9641430974006653,
"max": 2.6294727325439453,
"count": 18
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9216.244140625,
"min": 9216.244140625,
"max": 25651.970703125,
"count": 18
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 29944.0,
"max": 199984.0,
"count": 18
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 29944.0,
"max": 199984.0,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.865510940551758,
"min": 2.5459892749786377,
"max": 12.865510940551758,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2508.774658203125,
"min": 249.5069580078125,
"max": 2617.5068359375,
"count": 18
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 18
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 4378.0,
"max": 10945.0,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06393811743848844,
"min": 0.06308957613360047,
"max": 0.07660549775167715,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25575246975395377,
"min": 0.14248750751265143,
"max": 0.3830274887583857,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2105106646742891,
"min": 0.20008689612150193,
"max": 0.283398783017023,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8420426586971564,
"min": 0.4916150629447371,
"max": 1.3745426965110443,
"count": 18
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.0260976579999984e-06,
"min": 7.0260976579999984e-06,
"max": 0.00025782601405799994,
"count": 18
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.8104390631999994e-05,
"min": 2.8104390631999994e-05,
"max": 0.00116538011154,
"count": 18
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10234199999999999,
"min": 0.10234199999999999,
"max": 0.185942,
"count": 18
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40936799999999995,
"min": 0.371884,
"max": 0.88846,
"count": 18
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001268658,
"min": 0.0001268658,
"max": 0.0042985058,
"count": 18
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005074632,
"min": 0.0005074632,
"max": 0.019434154,
"count": 18
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.272727272727273,
"min": 8.136363636363637,
"max": 25.4,
"count": 18
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1112.0,
"min": 179.0,
"max": 1397.0,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.272727272727273,
"min": 8.136363636363637,
"max": 25.4,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1112.0,
"min": 179.0,
"max": 1397.0,
"count": 18
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1699856013",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --resume --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1699856561"
},
"total": 547.9894619360002,
"count": 1,
"self": 0.5269451160020253,
"children": {
"run_training.setup": {
"total": 0.05518852999921364,
"count": 1,
"self": 0.05518852999921364
},
"TrainerController.start_learning": {
"total": 547.407328289999,
"count": 1,
"self": 0.807718579972061,
"children": {
"TrainerController._reset_env": {
"total": 1.2686758830004692,
"count": 1,
"self": 1.2686758830004692
},
"TrainerController.advance": {
"total": 545.2262462700273,
"count": 15940,
"self": 0.3994687339509255,
"children": {
"env_step": {
"total": 544.8267775360764,
"count": 15940,
"self": 425.22428863147616,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.22216316672348,
"count": 15940,
"self": 2.4133158197819284,
"children": {
"TorchPolicy.evaluate": {
"total": 116.80884734694155,
"count": 15940,
"self": 116.80884734694155
}
}
},
"workers": {
"total": 0.38032573787677393,
"count": 15940,
"self": 0.0,
"children": {
"worker_root": {
"total": 545.2851653669641,
"count": 15940,
"is_parallel": true,
"self": 252.70668793811456,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022862000005261507,
"count": 1,
"is_parallel": true,
"self": 0.0006671110022580251,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016190889982681256,
"count": 10,
"is_parallel": true,
"self": 0.0016190889982681256
}
}
},
"UnityEnvironment.step": {
"total": 0.058827279000979615,
"count": 1,
"is_parallel": true,
"self": 0.0007621490021847421,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005764500001532724,
"count": 1,
"is_parallel": true,
"self": 0.0005764500001532724
},
"communicator.exchange": {
"total": 0.055170623998492374,
"count": 1,
"is_parallel": true,
"self": 0.055170623998492374
},
"steps_from_proto": {
"total": 0.0023180560001492267,
"count": 1,
"is_parallel": true,
"self": 0.00048224999591184314,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018358060042373836,
"count": 10,
"is_parallel": true,
"self": 0.0018358060042373836
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 292.5784774288495,
"count": 15939,
"is_parallel": true,
"self": 12.722130422656846,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.863152905067182,
"count": 15939,
"is_parallel": true,
"self": 6.863152905067182
},
"communicator.exchange": {
"total": 233.159760291961,
"count": 15939,
"is_parallel": true,
"self": 233.159760291961
},
"steps_from_proto": {
"total": 39.83343380916449,
"count": 15939,
"is_parallel": true,
"self": 8.114159701128301,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.71927410803619,
"count": 159390,
"is_parallel": true,
"self": 31.71927410803619
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00022118599918030668,
"count": 1,
"self": 0.00022118599918030668,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 540.0100323867064,
"count": 561566,
"is_parallel": true,
"self": 13.906041291804286,
"children": {
"process_trajectory": {
"total": 296.16663390090434,
"count": 561566,
"is_parallel": true,
"self": 295.2144950159036,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9521388850007497,
"count": 4,
"is_parallel": true,
"self": 0.9521388850007497
}
}
},
"_update_policy": {
"total": 229.93735719399774,
"count": 79,
"is_parallel": true,
"self": 67.69718843906958,
"children": {
"TorchPPOOptimizer.update": {
"total": 162.24016875492816,
"count": 4026,
"is_parallel": true,
"self": 162.24016875492816
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10446637099994405,
"count": 1,
"self": 0.0018950730009237304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10257129799902032,
"count": 1,
"self": 0.10257129799902032
}
}
}
}
}
}
}