Changyu Gao
First Push
dac28de
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0497850179672241,
"min": 1.0497850179672241,
"max": 2.7303478717803955,
"count": 18
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10658.4677734375,
"min": 10085.4130859375,
"max": 27871.390625,
"count": 18
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 29944.0,
"max": 199984.0,
"count": 18
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 29944.0,
"max": 199984.0,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.152831077575684,
"min": 2.370511770248413,
"max": 13.152831077575684,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2564.802001953125,
"min": 445.6562194824219,
"max": 2660.14111328125,
"count": 18
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 18
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0717393630794843,
"min": 0.05853487922457348,
"max": 0.07470912224608564,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2869574523179372,
"min": 0.23413951689829393,
"max": 0.3722563746471446,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18109591465954689,
"min": 0.18109591465954689,
"max": 0.3214423927606321,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7243836586381875,
"min": 0.7243836586381875,
"max": 1.6072119638031603,
"count": 18
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.4060097594000024e-05,
"min": 2.4060097594000024e-05,
"max": 0.000871060012894,
"count": 18
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.62403903760001e-05,
"min": 9.62403903760001e-05,
"max": 0.0038878001112199996,
"count": 18
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10240600000000001,
"min": 0.10240600000000001,
"max": 0.18710600000000002,
"count": 18
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40962400000000004,
"min": 0.40962400000000004,
"max": 0.8887800000000001,
"count": 18
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00013005940000000016,
"min": 0.00013005940000000016,
"max": 0.004356589400000001,
"count": 18
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005202376000000006,
"min": 0.0005202376000000006,
"max": 0.019450122000000004,
"count": 18
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.59090909090909,
"min": 7.7727272727272725,
"max": 26.59090909090909,
"count": 18
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1170.0,
"min": 342.0,
"max": 1420.0,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.59090909090909,
"min": 7.7727272727272725,
"max": 26.59090909090909,
"count": 18
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1170.0,
"min": 342.0,
"max": 1420.0,
"count": 18
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 18
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686165051",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686165452"
},
"total": 401.00628065399997,
"count": 1,
"self": 0.44073737399992297,
"children": {
"run_training.setup": {
"total": 0.05446250200009217,
"count": 1,
"self": 0.05446250200009217
},
"TrainerController.start_learning": {
"total": 400.51108077799995,
"count": 1,
"self": 0.4975294040019662,
"children": {
"TrainerController._reset_env": {
"total": 4.552501032000009,
"count": 1,
"self": 4.552501032000009
},
"TrainerController.advance": {
"total": 395.329616187998,
"count": 16396,
"self": 0.2178763969989177,
"children": {
"env_step": {
"total": 395.1117397909991,
"count": 16396,
"self": 288.95184155298523,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.93629264700564,
"count": 16396,
"self": 1.5434962390024793,
"children": {
"TorchPolicy.evaluate": {
"total": 104.39279640800316,
"count": 16396,
"self": 104.39279640800316
}
}
},
"workers": {
"total": 0.22360559100820865,
"count": 16396,
"self": 0.0,
"children": {
"worker_root": {
"total": 399.1767121200021,
"count": 16396,
"is_parallel": true,
"self": 188.9573629980041,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025297219999629306,
"count": 1,
"is_parallel": true,
"self": 0.0006352729998297946,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001894449000133136,
"count": 10,
"is_parallel": true,
"self": 0.001894449000133136
}
}
},
"UnityEnvironment.step": {
"total": 0.035092385999973885,
"count": 1,
"is_parallel": true,
"self": 0.0005859020000116288,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005024499999990439,
"count": 1,
"is_parallel": true,
"self": 0.0005024499999990439
},
"communicator.exchange": {
"total": 0.03200594500003717,
"count": 1,
"is_parallel": true,
"self": 0.03200594500003717
},
"steps_from_proto": {
"total": 0.00199808899992604,
"count": 1,
"is_parallel": true,
"self": 0.00036533499996949104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016327539999565488,
"count": 10,
"is_parallel": true,
"self": 0.0016327539999565488
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 210.219349121998,
"count": 16395,
"is_parallel": true,
"self": 8.36910340099837,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.505482750000397,
"count": 16395,
"is_parallel": true,
"self": 4.505482750000397
},
"communicator.exchange": {
"total": 168.99662558000466,
"count": 16395,
"is_parallel": true,
"self": 168.99662558000466
},
"steps_from_proto": {
"total": 28.348137390994566,
"count": 16395,
"is_parallel": true,
"self": 5.2998149489742445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.04832244202032,
"count": 163950,
"is_parallel": true,
"self": 23.04832244202032
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00042104099998141464,
"count": 1,
"self": 0.00042104099998141464,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 392.3486330929983,
"count": 374270,
"is_parallel": true,
"self": 8.186072720951529,
"children": {
"process_trajectory": {
"total": 214.88170560304673,
"count": 374270,
"is_parallel": true,
"self": 213.67281981004646,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2088857930002632,
"count": 4,
"is_parallel": true,
"self": 1.2088857930002632
}
}
},
"_update_policy": {
"total": 169.28085476900003,
"count": 81,
"is_parallel": true,
"self": 64.1753867340052,
"children": {
"TorchPPOOptimizer.update": {
"total": 105.10546803499483,
"count": 4128,
"is_parallel": true,
"self": 105.10546803499483
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13101311299999452,
"count": 1,
"self": 0.0011919919999172635,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12982112100007726,
"count": 1,
"self": 0.12982112100007726
}
}
}
}
}
}
}