apopam's picture
First Push
d87ff08
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9872238636016846,
"min": 0.9560187458992004,
"max": 2.8778398036956787,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9458.591796875,
"min": 9458.591796875,
"max": 29535.26953125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.848539352416992,
"min": 0.3894674479961395,
"max": 12.848539352416992,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2505.465087890625,
"min": 75.55668640136719,
"max": 2608.240234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0658234222386918,
"min": 0.06410840705154673,
"max": 0.07483835568029248,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2632936889547672,
"min": 0.2564336282061869,
"max": 0.36804040105483843,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18158820161924644,
"min": 0.08535747436975474,
"max": 0.28644621153088173,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7263528064769857,
"min": 0.341429897479019,
"max": 1.4032163976454268,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.25,
"min": 2.409090909090909,
"max": 25.6,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1111.0,
"min": 106.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.25,
"min": 2.409090909090909,
"max": 25.6,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1111.0,
"min": 106.0,
"max": 1408.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687283216",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687283782"
},
"total": 566.8323021299999,
"count": 1,
"self": 0.9857247919998144,
"children": {
"run_training.setup": {
"total": 0.07776007600000412,
"count": 1,
"self": 0.07776007600000412
},
"TrainerController.start_learning": {
"total": 565.7688172620001,
"count": 1,
"self": 0.8323848099886391,
"children": {
"TrainerController._reset_env": {
"total": 1.876196400999902,
"count": 1,
"self": 1.876196400999902
},
"TrainerController.advance": {
"total": 562.8125787970114,
"count": 18229,
"self": 0.38118027601103677,
"children": {
"env_step": {
"total": 562.4313985210003,
"count": 18229,
"self": 455.14114348301575,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.90682311400394,
"count": 18229,
"self": 2.390534069004161,
"children": {
"TorchPolicy.evaluate": {
"total": 104.51628904499978,
"count": 18229,
"self": 104.51628904499978
}
}
},
"workers": {
"total": 0.38343192398065185,
"count": 18229,
"self": 0.0,
"children": {
"worker_root": {
"total": 563.4959176320211,
"count": 18229,
"is_parallel": true,
"self": 248.01812399201685,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008198760000027505,
"count": 1,
"is_parallel": true,
"self": 0.0056289919998562254,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025697680001712797,
"count": 10,
"is_parallel": true,
"self": 0.0025697680001712797
}
}
},
"UnityEnvironment.step": {
"total": 0.0795048569999608,
"count": 1,
"is_parallel": true,
"self": 0.0007351539999262968,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004965830000855931,
"count": 1,
"is_parallel": true,
"self": 0.0004965830000855931
},
"communicator.exchange": {
"total": 0.07145924700000705,
"count": 1,
"is_parallel": true,
"self": 0.07145924700000705
},
"steps_from_proto": {
"total": 0.00681387299994185,
"count": 1,
"is_parallel": true,
"self": 0.00046336200011865003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0063505109998232,
"count": 10,
"is_parallel": true,
"self": 0.0063505109998232
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 315.4777936400043,
"count": 18228,
"is_parallel": true,
"self": 13.066838885011407,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.8861699049995195,
"count": 18228,
"is_parallel": true,
"self": 6.8861699049995195
},
"communicator.exchange": {
"total": 250.81551151999508,
"count": 18228,
"is_parallel": true,
"self": 250.81551151999508
},
"steps_from_proto": {
"total": 44.70927332999827,
"count": 18228,
"is_parallel": true,
"self": 8.76677687295603,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.94249645704224,
"count": 182280,
"is_parallel": true,
"self": 35.94249645704224
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00020572000016727543,
"count": 1,
"self": 0.00020572000016727543,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 558.1449772630156,
"count": 564714,
"is_parallel": true,
"self": 12.976396341969235,
"children": {
"process_trajectory": {
"total": 298.4114162920465,
"count": 564714,
"is_parallel": true,
"self": 296.78552623704627,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6258900550002409,
"count": 4,
"is_parallel": true,
"self": 1.6258900550002409
}
}
},
"_update_policy": {
"total": 246.75716462899982,
"count": 90,
"is_parallel": true,
"self": 86.25190096499182,
"children": {
"TorchPPOOptimizer.update": {
"total": 160.505263664008,
"count": 4587,
"is_parallel": true,
"self": 160.505263664008
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.24745153399999253,
"count": 1,
"self": 0.0013321760000053473,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2461193579999872,
"count": 1,
"self": 0.2461193579999872
}
}
}
}
}
}
}