jwright94's picture
First Version
94e65f4
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0902103185653687,
"min": 1.0902103185653687,
"max": 2.8767282962799072,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10409.328125,
"min": 10409.328125,
"max": 29460.57421875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.8401517868042,
"min": 0.2519228160381317,
"max": 12.8401517868042,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2503.82958984375,
"min": 48.873023986816406,
"max": 2601.0302734375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0707265557179127,
"min": 0.06183445301390819,
"max": 0.07474846393212348,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2829062228716508,
"min": 0.24733781205563277,
"max": 0.3501568687273653,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18929746034829056,
"min": 0.11987323994866991,
"max": 0.27918129823371474,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7571898413931623,
"min": 0.47949295979467965,
"max": 1.349904404843555,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10134700000000002,
"min": 0.10134700000000002,
"max": 0.148647,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4053880000000001,
"min": 0.4053880000000001,
"max": 0.73086,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.40909090909091,
"min": 3.0681818181818183,
"max": 25.40909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1118.0,
"min": 135.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.40909090909091,
"min": 3.0681818181818183,
"max": 25.40909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1118.0,
"min": 135.0,
"max": 1378.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675135388",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675135808"
},
"total": 419.0406654149999,
"count": 1,
"self": 0.3962756680000439,
"children": {
"run_training.setup": {
"total": 0.10444804600001589,
"count": 1,
"self": 0.10444804600001589
},
"TrainerController.start_learning": {
"total": 418.53994170099986,
"count": 1,
"self": 0.4991521600051101,
"children": {
"TrainerController._reset_env": {
"total": 10.050479984000049,
"count": 1,
"self": 10.050479984000049
},
"TrainerController.advance": {
"total": 407.8739331759947,
"count": 18202,
"self": 0.24590565496043837,
"children": {
"env_step": {
"total": 407.62802752103426,
"count": 18202,
"self": 264.695551015039,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.6872872480052,
"count": 18202,
"self": 1.3564303000081281,
"children": {
"TorchPolicy.evaluate": {
"total": 141.33085694799706,
"count": 18202,
"self": 31.740717866998352,
"children": {
"TorchPolicy.sample_actions": {
"total": 109.59013908099871,
"count": 18202,
"self": 109.59013908099871
}
}
}
}
},
"workers": {
"total": 0.2451892579900914,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 417.34961300698694,
"count": 18202,
"is_parallel": true,
"self": 202.77235553799198,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005885981000005813,
"count": 1,
"is_parallel": true,
"self": 0.0034170090003726727,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024689719996331405,
"count": 10,
"is_parallel": true,
"self": 0.0024689719996331405
}
}
},
"UnityEnvironment.step": {
"total": 0.04155735300003016,
"count": 1,
"is_parallel": true,
"self": 0.0005350950000320154,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002934679999953005,
"count": 1,
"is_parallel": true,
"self": 0.0002934679999953005
},
"communicator.exchange": {
"total": 0.03878479000002244,
"count": 1,
"is_parallel": true,
"self": 0.03878479000002244
},
"steps_from_proto": {
"total": 0.0019439999999804058,
"count": 1,
"is_parallel": true,
"self": 0.0004280080000853559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00151599199989505,
"count": 10,
"is_parallel": true,
"self": 0.00151599199989505
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 214.57725746899496,
"count": 18201,
"is_parallel": true,
"self": 8.200629024989894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.7966552679932875,
"count": 18201,
"is_parallel": true,
"self": 4.7966552679932875
},
"communicator.exchange": {
"total": 172.60588324300966,
"count": 18201,
"is_parallel": true,
"self": 172.60588324300966
},
"steps_from_proto": {
"total": 28.97408993300212,
"count": 18201,
"is_parallel": true,
"self": 6.158997460034698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.81509247296742,
"count": 182010,
"is_parallel": true,
"self": 22.81509247296742
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.2769999936354e-05,
"count": 1,
"self": 4.2769999936354e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 404.9752495340874,
"count": 337900,
"is_parallel": true,
"self": 8.753263834082645,
"children": {
"process_trajectory": {
"total": 232.55447897900615,
"count": 337900,
"is_parallel": true,
"self": 231.78774425600625,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7667347229998995,
"count": 4,
"is_parallel": true,
"self": 0.7667347229998995
}
}
},
"_update_policy": {
"total": 163.66750672099863,
"count": 90,
"is_parallel": true,
"self": 41.60985760000051,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.05764912099812,
"count": 4587,
"is_parallel": true,
"self": 122.05764912099812
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11633361100007278,
"count": 1,
"self": 0.000804197000206841,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11552941399986594,
"count": 1,
"self": 0.11552941399986594
}
}
}
}
}
}
}