messham's picture
First Push
7279823
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.05338716506958,
"min": 1.05338716506958,
"max": 2.8590540885925293,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10080.9150390625,
"min": 10080.9150390625,
"max": 29342.47265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.936591148376465,
"min": 0.38258469104766846,
"max": 11.936591148376465,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2327.63525390625,
"min": 74.22142791748047,
"max": 2402.373046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06913626900224663,
"min": 0.06178730253230634,
"max": 0.0759000577238005,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27654507600898653,
"min": 0.2567769905130548,
"max": 0.3795002886190025,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20873651423436754,
"min": 0.1387146952557469,
"max": 0.2734910408217533,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8349460569374701,
"min": 0.5548587810229876,
"max": 1.2654076312102522,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.977272727272727,
"min": 3.8863636363636362,
"max": 23.977272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1055.0,
"min": 171.0,
"max": 1311.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.977272727272727,
"min": 3.8863636363636362,
"max": 23.977272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1055.0,
"min": 171.0,
"max": 1311.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680946686",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680947293"
},
"total": 606.806786807,
"count": 1,
"self": 0.6396913060000315,
"children": {
"run_training.setup": {
"total": 0.25467186200000924,
"count": 1,
"self": 0.25467186200000924
},
"TrainerController.start_learning": {
"total": 605.9124236389999,
"count": 1,
"self": 0.9458133859968711,
"children": {
"TrainerController._reset_env": {
"total": 1.305658333999986,
"count": 1,
"self": 1.305658333999986
},
"TrainerController.advance": {
"total": 603.508910703003,
"count": 18204,
"self": 0.4649136019936577,
"children": {
"env_step": {
"total": 603.0439971010094,
"count": 18204,
"self": 491.9772780220218,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.6092721659852,
"count": 18204,
"self": 2.8578475239887666,
"children": {
"TorchPolicy.evaluate": {
"total": 107.75142464199644,
"count": 18204,
"self": 107.75142464199644
}
}
},
"workers": {
"total": 0.4574469130023999,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 603.6023964810075,
"count": 18204,
"is_parallel": true,
"self": 254.35592098701147,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00877268199997161,
"count": 1,
"is_parallel": true,
"self": 0.005024787000024844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003747894999946766,
"count": 10,
"is_parallel": true,
"self": 0.003747894999946766
}
}
},
"UnityEnvironment.step": {
"total": 0.10290686299993013,
"count": 1,
"is_parallel": true,
"self": 0.0008003099999314145,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005242810000254394,
"count": 1,
"is_parallel": true,
"self": 0.0005242810000254394
},
"communicator.exchange": {
"total": 0.09386212200001864,
"count": 1,
"is_parallel": true,
"self": 0.09386212200001864
},
"steps_from_proto": {
"total": 0.007720149999954629,
"count": 1,
"is_parallel": true,
"self": 0.0005472180000651861,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0071729319998894425,
"count": 10,
"is_parallel": true,
"self": 0.0071729319998894425
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 349.24647549399606,
"count": 18203,
"is_parallel": true,
"self": 14.522918412005197,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.003773373988906,
"count": 18203,
"is_parallel": true,
"self": 8.003773373988906
},
"communicator.exchange": {
"total": 280.6770249210043,
"count": 18203,
"is_parallel": true,
"self": 280.6770249210043
},
"steps_from_proto": {
"total": 46.04275878699764,
"count": 18203,
"is_parallel": true,
"self": 9.675356543998987,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.36740224299865,
"count": 182030,
"is_parallel": true,
"self": 36.36740224299865
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002598040000520996,
"count": 1,
"self": 0.0002598040000520996,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 597.3262048590668,
"count": 712640,
"is_parallel": true,
"self": 17.478520270068884,
"children": {
"process_trajectory": {
"total": 325.07554716899756,
"count": 712640,
"is_parallel": true,
"self": 324.0385568169977,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0369903519998616,
"count": 4,
"is_parallel": true,
"self": 1.0369903519998616
}
}
},
"_update_policy": {
"total": 254.77213742000038,
"count": 90,
"is_parallel": true,
"self": 89.91283004899833,
"children": {
"TorchPPOOptimizer.update": {
"total": 164.85930737100205,
"count": 4587,
"is_parallel": true,
"self": 164.85930737100205
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15178141199999118,
"count": 1,
"self": 0.0018060350000723702,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1499753769999188,
"count": 1,
"self": 0.1499753769999188
}
}
}
}
}
}
}