Junfeng's picture
First Push
d406f3b
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9224082827568054,
"min": 0.9022365212440491,
"max": 2.8675522804260254,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8807.154296875,
"min": 8703.8759765625,
"max": 29366.603515625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.546395301818848,
"min": 0.41490688920021057,
"max": 12.546395301818848,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2446.547119140625,
"min": 80.49193572998047,
"max": 2541.0498046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06753871375386714,
"min": 0.062171391156595084,
"max": 0.0762532351868953,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27015485501546854,
"min": 0.24868556462638033,
"max": 0.3812661759344765,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21368467690897924,
"min": 0.14262103590447747,
"max": 0.2739600560244392,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.854738707635917,
"min": 0.5704841436179099,
"max": 1.3698002801221958,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.75,
"min": 3.5,
"max": 25.09090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1089.0,
"min": 154.0,
"max": 1344.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.75,
"min": 3.5,
"max": 25.09090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1089.0,
"min": 154.0,
"max": 1344.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674460627",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674461090"
},
"total": 463.00380336499995,
"count": 1,
"self": 0.38949457799992615,
"children": {
"run_training.setup": {
"total": 0.12308765099999164,
"count": 1,
"self": 0.12308765099999164
},
"TrainerController.start_learning": {
"total": 462.49122113600004,
"count": 1,
"self": 0.6179039540000986,
"children": {
"TrainerController._reset_env": {
"total": 9.255469196999911,
"count": 1,
"self": 9.255469196999911
},
"TrainerController.advance": {
"total": 452.49385829100015,
"count": 18200,
"self": 0.32328319100338376,
"children": {
"env_step": {
"total": 452.17057509999677,
"count": 18200,
"self": 296.94254165098766,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.91538492499126,
"count": 18200,
"self": 1.5613031249743017,
"children": {
"TorchPolicy.evaluate": {
"total": 153.35408180001696,
"count": 18200,
"self": 35.54428338201501,
"children": {
"TorchPolicy.sample_actions": {
"total": 117.80979841800195,
"count": 18200,
"self": 117.80979841800195
}
}
}
}
},
"workers": {
"total": 0.3126485240178454,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.07689176700796,
"count": 18200,
"is_parallel": true,
"self": 218.46266281401245,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006892103000041061,
"count": 1,
"is_parallel": true,
"self": 0.003276183000139099,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003615919999901962,
"count": 10,
"is_parallel": true,
"self": 0.003615919999901962
}
}
},
"UnityEnvironment.step": {
"total": 0.03558136799995282,
"count": 1,
"is_parallel": true,
"self": 0.0005214549998981965,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003976670000156446,
"count": 1,
"is_parallel": true,
"self": 0.0003976670000156446
},
"communicator.exchange": {
"total": 0.03270595399999365,
"count": 1,
"is_parallel": true,
"self": 0.03270595399999365
},
"steps_from_proto": {
"total": 0.0019562920000453232,
"count": 1,
"is_parallel": true,
"self": 0.0004479919999766935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015083000000686297,
"count": 10,
"is_parallel": true,
"self": 0.0015083000000686297
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 242.61422895299552,
"count": 18199,
"is_parallel": true,
"self": 8.932599310990327,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.455656493007837,
"count": 18199,
"is_parallel": true,
"self": 5.455656493007837
},
"communicator.exchange": {
"total": 192.18624906000662,
"count": 18199,
"is_parallel": true,
"self": 192.18624906000662
},
"steps_from_proto": {
"total": 36.03972408899074,
"count": 18199,
"is_parallel": true,
"self": 7.641112393025537,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.3986116959652,
"count": 181990,
"is_parallel": true,
"self": 28.3986116959652
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.820599997401587e-05,
"count": 1,
"self": 4.820599997401587e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 449.23918538398607,
"count": 376957,
"is_parallel": true,
"self": 10.328383478987462,
"children": {
"process_trajectory": {
"total": 258.3809782909992,
"count": 376957,
"is_parallel": true,
"self": 257.6940318619993,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6869464289999314,
"count": 4,
"is_parallel": true,
"self": 0.6869464289999314
}
}
},
"_update_policy": {
"total": 180.52982361399938,
"count": 90,
"is_parallel": true,
"self": 42.35432140699629,
"children": {
"TorchPPOOptimizer.update": {
"total": 138.1755022070031,
"count": 4587,
"is_parallel": true,
"self": 138.1755022070031
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12394148799990035,
"count": 1,
"self": 0.0010592809999252495,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1228822069999751,
"count": 1,
"self": 0.1228822069999751
}
}
}
}
}
}
}