besa2001's picture
First Push
f1338f8
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0443655252456665,
"min": 1.0443655252456665,
"max": 2.863853693008423,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9971.6015625,
"min": 9971.6015625,
"max": 29328.7265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.095455169677734,
"min": 0.4043411910533905,
"max": 12.095455169677734,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2358.61376953125,
"min": 78.44219207763672,
"max": 2448.1064453125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06992694577167961,
"min": 0.05732152753540397,
"max": 0.07296822791670587,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27970778308671845,
"min": 0.22928611014161587,
"max": 0.36144623877800197,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16861307515087082,
"min": 0.1458241248277345,
"max": 0.2920259167896766,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6744523006034833,
"min": 0.583296499310938,
"max": 1.409999023465549,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 23.795454545454547,
"min": 4.295454545454546,
"max": 23.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1047.0,
"min": 189.0,
"max": 1311.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 23.795454545454547,
"min": 4.295454545454546,
"max": 23.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1047.0,
"min": 189.0,
"max": 1311.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673633692",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673634119"
},
"total": 426.21681739899986,
"count": 1,
"self": 0.39168564499993863,
"children": {
"run_training.setup": {
"total": 0.10722537899994222,
"count": 1,
"self": 0.10722537899994222
},
"TrainerController.start_learning": {
"total": 425.717906375,
"count": 1,
"self": 0.5115745960010827,
"children": {
"TrainerController._reset_env": {
"total": 9.774187464000079,
"count": 1,
"self": 9.774187464000079
},
"TrainerController.advance": {
"total": 415.3102505739987,
"count": 18202,
"self": 0.24587060599117194,
"children": {
"env_step": {
"total": 415.06437996800753,
"count": 18202,
"self": 269.2454888950143,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.55911488300717,
"count": 18202,
"self": 1.3651868880201619,
"children": {
"TorchPolicy.evaluate": {
"total": 144.193927994987,
"count": 18202,
"self": 32.87408873298875,
"children": {
"TorchPolicy.sample_actions": {
"total": 111.31983926199825,
"count": 18202,
"self": 111.31983926199825
}
}
}
}
},
"workers": {
"total": 0.2597761899860416,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 424.51594483799556,
"count": 18202,
"is_parallel": true,
"self": 207.3907112569974,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.010065822999990814,
"count": 1,
"is_parallel": true,
"self": 0.0032314999999698557,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006834323000020959,
"count": 10,
"is_parallel": true,
"self": 0.006834323000020959
}
}
},
"UnityEnvironment.step": {
"total": 0.03225478000001658,
"count": 1,
"is_parallel": true,
"self": 0.00036208599988185597,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039012900003854156,
"count": 1,
"is_parallel": true,
"self": 0.00039012900003854156
},
"communicator.exchange": {
"total": 0.029832528000042657,
"count": 1,
"is_parallel": true,
"self": 0.029832528000042657
},
"steps_from_proto": {
"total": 0.0016700370000535258,
"count": 1,
"is_parallel": true,
"self": 0.00039985000023534667,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012701869998181792,
"count": 10,
"is_parallel": true,
"self": 0.0012701869998181792
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 217.12523358099816,
"count": 18201,
"is_parallel": true,
"self": 8.376296082017348,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.978834629983226,
"count": 18201,
"is_parallel": true,
"self": 4.978834629983226
},
"communicator.exchange": {
"total": 173.60445570599938,
"count": 18201,
"is_parallel": true,
"self": 173.60445570599938
},
"steps_from_proto": {
"total": 30.165647162998198,
"count": 18201,
"is_parallel": true,
"self": 6.346313957957932,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.819333205040266,
"count": 182010,
"is_parallel": true,
"self": 23.819333205040266
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.943799993066932e-05,
"count": 1,
"self": 3.943799993066932e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 412.43650073202593,
"count": 333360,
"is_parallel": true,
"self": 8.909559002041306,
"children": {
"process_trajectory": {
"total": 235.64024382298533,
"count": 333360,
"is_parallel": true,
"self": 234.88792844498528,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7523153780000484,
"count": 4,
"is_parallel": true,
"self": 0.7523153780000484
}
}
},
"_update_policy": {
"total": 167.8866979069993,
"count": 90,
"is_parallel": true,
"self": 44.02173338999796,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.86496451700134,
"count": 4587,
"is_parallel": true,
"self": 123.86496451700134
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12185430300019107,
"count": 1,
"self": 0.0008595560000230762,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12099474700016799,
"count": 1,
"self": 0.12099474700016799
}
}
}
}
}
}
}