cmenasse's picture
First Push
679fcb5
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0457826852798462,
"min": 1.0457826852798462,
"max": 2.8472232818603516,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9985.1328125,
"min": 9985.1328125,
"max": 29158.4140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.779532432556152,
"min": 0.3289189338684082,
"max": 12.811956405639648,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2492.0087890625,
"min": 63.810272216796875,
"max": 2613.63916015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06601817678529187,
"min": 0.06300067347611356,
"max": 0.07691173175454406,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2640727071411675,
"min": 0.25200269390445423,
"max": 0.36766242202757693,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19987702873699806,
"min": 0.13790977085420095,
"max": 0.2764623732689549,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7995081149479922,
"min": 0.5516390834168038,
"max": 1.379391221731317,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.022727272727273,
"min": 3.4318181818181817,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1101.0,
"min": 151.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.022727272727273,
"min": 3.4318181818181817,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1101.0,
"min": 151.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674243699",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674244107"
},
"total": 407.1737767589998,
"count": 1,
"self": 0.3408664029998363,
"children": {
"run_training.setup": {
"total": 0.10176681200005078,
"count": 1,
"self": 0.10176681200005078
},
"TrainerController.start_learning": {
"total": 406.7311435439999,
"count": 1,
"self": 0.5137968019864729,
"children": {
"TrainerController._reset_env": {
"total": 9.738123500000029,
"count": 1,
"self": 9.738123500000029
},
"TrainerController.advance": {
"total": 396.3686489240133,
"count": 18202,
"self": 0.26509696500579594,
"children": {
"env_step": {
"total": 396.1035519590075,
"count": 18202,
"self": 257.0690986259922,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.78560489799565,
"count": 18202,
"self": 1.326758054025504,
"children": {
"TorchPolicy.evaluate": {
"total": 137.45884684397015,
"count": 18202,
"self": 30.973776567982213,
"children": {
"TorchPolicy.sample_actions": {
"total": 106.48507027598794,
"count": 18202,
"self": 106.48507027598794
}
}
}
}
},
"workers": {
"total": 0.24884843501968135,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 405.6067203700152,
"count": 18202,
"is_parallel": true,
"self": 196.3684437160507,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009206556999970417,
"count": 1,
"is_parallel": true,
"self": 0.005240569999841682,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0039659870001287345,
"count": 10,
"is_parallel": true,
"self": 0.0039659870001287345
}
}
},
"UnityEnvironment.step": {
"total": 0.034284782000213454,
"count": 1,
"is_parallel": true,
"self": 0.0006287260002864059,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000279227999953946,
"count": 1,
"is_parallel": true,
"self": 0.000279227999953946
},
"communicator.exchange": {
"total": 0.031420859000036216,
"count": 1,
"is_parallel": true,
"self": 0.031420859000036216
},
"steps_from_proto": {
"total": 0.0019559689999368857,
"count": 1,
"is_parallel": true,
"self": 0.0004437690004124306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001512199999524455,
"count": 10,
"is_parallel": true,
"self": 0.001512199999524455
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 209.23827665396448,
"count": 18201,
"is_parallel": true,
"self": 7.9986135039848705,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.757286246996955,
"count": 18201,
"is_parallel": true,
"self": 4.757286246996955
},
"communicator.exchange": {
"total": 167.96224445802113,
"count": 18201,
"is_parallel": true,
"self": 167.96224445802113
},
"steps_from_proto": {
"total": 28.52013244496152,
"count": 18201,
"is_parallel": true,
"self": 5.991891630002328,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.52824081495919,
"count": 182010,
"is_parallel": true,
"self": 22.52824081495919
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.9431999994121725e-05,
"count": 1,
"self": 3.9431999994121725e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 393.74867450290003,
"count": 323032,
"is_parallel": true,
"self": 8.222959823811152,
"children": {
"process_trajectory": {
"total": 227.3287449850859,
"count": 323032,
"is_parallel": true,
"self": 226.57043565808567,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7583093270002337,
"count": 4,
"is_parallel": true,
"self": 0.7583093270002337
}
}
},
"_update_policy": {
"total": 158.19696969400297,
"count": 90,
"is_parallel": true,
"self": 40.00761731800435,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.18935237599862,
"count": 4587,
"is_parallel": true,
"self": 118.18935237599862
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.11053488600009587,
"count": 1,
"self": 0.0008292630004689272,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10970562299962694,
"count": 1,
"self": 0.10970562299962694
}
}
}
}
}
}
}