TRiddle's picture
First Push
5d02e92
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.091011881828308,
"min": 1.091011881828308,
"max": 2.865849733352661,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10416.9814453125,
"min": 10416.9814453125,
"max": 29380.69140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.406991004943848,
"min": 0.3362593948841095,
"max": 11.406991004943848,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2224.36328125,
"min": 65.23432159423828,
"max": 2255.86083984375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06851821068024341,
"min": 0.06274124394518617,
"max": 0.0749855676680026,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27407284272097365,
"min": 0.25096497578074467,
"max": 0.3547584234922896,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20784831258888337,
"min": 0.1294005221787694,
"max": 0.3040717119095372,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8313932503555335,
"min": 0.5176020887150776,
"max": 1.520358559547686,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.454545454545453,
"min": 3.3181818181818183,
"max": 23.454545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1032.0,
"min": 146.0,
"max": 1224.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.454545454545453,
"min": 3.3181818181818183,
"max": 23.454545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1032.0,
"min": 146.0,
"max": 1224.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679502678",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679503158"
},
"total": 479.67949910699997,
"count": 1,
"self": 0.3866836700000249,
"children": {
"run_training.setup": {
"total": 0.1904147230000035,
"count": 1,
"self": 0.1904147230000035
},
"TrainerController.start_learning": {
"total": 479.10240071399994,
"count": 1,
"self": 0.5749870710077403,
"children": {
"TrainerController._reset_env": {
"total": 8.796961902000021,
"count": 1,
"self": 8.796961902000021
},
"TrainerController.advance": {
"total": 469.5917491839922,
"count": 18201,
"self": 0.2973743219922653,
"children": {
"env_step": {
"total": 469.2943748619999,
"count": 18201,
"self": 337.2280902380104,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.78057907898312,
"count": 18201,
"self": 2.5204209459881213,
"children": {
"TorchPolicy.evaluate": {
"total": 129.260158132995,
"count": 18201,
"self": 129.260158132995
}
}
},
"workers": {
"total": 0.2857055450064081,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 477.7759951049991,
"count": 18201,
"is_parallel": true,
"self": 229.12994313899958,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005591233000018292,
"count": 1,
"is_parallel": true,
"self": 0.004000598999823524,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015906340001947683,
"count": 10,
"is_parallel": true,
"self": 0.0015906340001947683
}
}
},
"UnityEnvironment.step": {
"total": 0.04456803300001866,
"count": 1,
"is_parallel": true,
"self": 0.0005143180001141445,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037044299995159236,
"count": 1,
"is_parallel": true,
"self": 0.00037044299995159236
},
"communicator.exchange": {
"total": 0.04158195499996964,
"count": 1,
"is_parallel": true,
"self": 0.04158195499996964
},
"steps_from_proto": {
"total": 0.0021013169999832826,
"count": 1,
"is_parallel": true,
"self": 0.00045245799992699176,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016488590000562908,
"count": 10,
"is_parallel": true,
"self": 0.0016488590000562908
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 248.6460519659995,
"count": 18200,
"is_parallel": true,
"self": 9.95745698199255,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.5260340420041985,
"count": 18200,
"is_parallel": true,
"self": 5.5260340420041985
},
"communicator.exchange": {
"total": 199.709675235002,
"count": 18200,
"is_parallel": true,
"self": 199.709675235002
},
"steps_from_proto": {
"total": 33.452885707000746,
"count": 18200,
"is_parallel": true,
"self": 6.629584892016169,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.823300814984577,
"count": 182000,
"is_parallel": true,
"self": 26.823300814984577
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014221900005395582,
"count": 1,
"self": 0.00014221900005395582,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 465.7829458849085,
"count": 421882,
"is_parallel": true,
"self": 11.145181364848781,
"children": {
"process_trajectory": {
"total": 260.9350587350591,
"count": 421882,
"is_parallel": true,
"self": 260.193583031059,
"children": {
"RLTrainer._checkpoint": {
"total": 0.741475704000095,
"count": 4,
"is_parallel": true,
"self": 0.741475704000095
}
}
},
"_update_policy": {
"total": 193.7027057850006,
"count": 90,
"is_parallel": true,
"self": 69.80255954699521,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.90014623800539,
"count": 4587,
"is_parallel": true,
"self": 123.90014623800539
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13856033799993384,
"count": 1,
"self": 0.0008673190000081377,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1376930189999257,
"count": 1,
"self": 0.1376930189999257
}
}
}
}
}
}
}