RegisGraptin's picture
First training
c345c2c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8322855234146118,
"min": 0.8322855234146118,
"max": 2.7957417964935303,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 16488.408203125,
"min": 16488.408203125,
"max": 57416.1484375,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 19992.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 19992.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.446203231811523,
"min": 0.9528762698173523,
"max": 12.446203231811523,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 4966.03515625,
"min": 380.1976318359375,
"max": 4966.03515625,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 19701.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0692439689593895,
"min": 0.06408763168243212,
"max": 0.07550944930959157,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.6231957206345055,
"min": 0.5767886851418891,
"max": 0.6795850437863241,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19492741195334848,
"min": 0.18646002653377508,
"max": 0.27060192570187686,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.7543467075801362,
"min": 1.6781402388039757,
"max": 2.435417331316892,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6332094556000002e-05,
"min": 1.6332094556000002e-05,
"max": 0.00028363200545599993,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.000146988851004,
"min": 0.000146988851004,
"max": 0.0025526880491039995,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10544400000000001,
"min": 0.10544400000000001,
"max": 0.19454400000000002,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9489960000000001,
"min": 0.9489960000000001,
"max": 1.7508960000000002,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00028165560000000005,
"min": 0.00028165560000000005,
"max": 0.0047277456,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0025349004000000007,
"min": 0.0025349004000000007,
"max": 0.0425497104,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.68686868686869,
"min": 5.181818181818182,
"max": 24.707070707070706,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2444.0,
"min": 513.0,
"max": 2446.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.68686868686869,
"min": 5.181818181818182,
"max": 24.707070707070706,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2444.0,
"min": 513.0,
"max": 2446.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673777196",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673777612"
},
"total": 415.303825705,
"count": 1,
"self": 0.3888988430000495,
"children": {
"run_training.setup": {
"total": 0.1117085409999845,
"count": 1,
"self": 0.1117085409999845
},
"TrainerController.start_learning": {
"total": 414.803218321,
"count": 1,
"self": 0.4745769419945418,
"children": {
"TrainerController._reset_env": {
"total": 9.440173540999979,
"count": 1,
"self": 9.440173540999979
},
"TrainerController.advance": {
"total": 404.7670398280055,
"count": 18202,
"self": 0.252513248012292,
"children": {
"env_step": {
"total": 404.51452657999323,
"count": 18202,
"self": 261.81654568998687,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.4430493090116,
"count": 18202,
"self": 1.3202563640122094,
"children": {
"TorchPolicy.evaluate": {
"total": 141.1227929449994,
"count": 18202,
"self": 31.13745684199432,
"children": {
"TorchPolicy.sample_actions": {
"total": 109.98533610300507,
"count": 18202,
"self": 109.98533610300507
}
}
}
}
},
"workers": {
"total": 0.2549315809947643,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 413.67734485099743,
"count": 18202,
"is_parallel": true,
"self": 203.4397966290066,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006118731000015032,
"count": 1,
"is_parallel": true,
"self": 0.003456120000066676,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002662610999948356,
"count": 10,
"is_parallel": true,
"self": 0.002662610999948356
}
}
},
"UnityEnvironment.step": {
"total": 0.0344935489999898,
"count": 1,
"is_parallel": true,
"self": 0.0004182760000048802,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031521599998995953,
"count": 1,
"is_parallel": true,
"self": 0.00031521599998995953
},
"communicator.exchange": {
"total": 0.03253884400004381,
"count": 1,
"is_parallel": true,
"self": 0.03253884400004381
},
"steps_from_proto": {
"total": 0.001221212999951149,
"count": 1,
"is_parallel": true,
"self": 0.0003871059998914461,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008341070000597028,
"count": 10,
"is_parallel": true,
"self": 0.0008341070000597028
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 210.23754822199083,
"count": 18201,
"is_parallel": true,
"self": 8.113584841001057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.8398602879904615,
"count": 18201,
"is_parallel": true,
"self": 4.8398602879904615
},
"communicator.exchange": {
"total": 168.1935638239944,
"count": 18201,
"is_parallel": true,
"self": 168.1935638239944
},
"steps_from_proto": {
"total": 29.09053926900492,
"count": 18201,
"is_parallel": true,
"self": 6.1888591819767385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.901680087028183,
"count": 182010,
"is_parallel": true,
"self": 22.901680087028183
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.3135999931109836e-05,
"count": 1,
"self": 4.3135999931109836e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 401.89261221799757,
"count": 321764,
"is_parallel": true,
"self": 8.884826081975177,
"children": {
"process_trajectory": {
"total": 228.7507015800229,
"count": 321764,
"is_parallel": true,
"self": 228.068985092023,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6817164879998927,
"count": 4,
"is_parallel": true,
"self": 0.6817164879998927
}
}
},
"_update_policy": {
"total": 164.25708455599948,
"count": 90,
"is_parallel": true,
"self": 42.274553390993674,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.98253116500581,
"count": 4587,
"is_parallel": true,
"self": 121.98253116500581
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12138487400000031,
"count": 1,
"self": 0.0008400389999678737,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12054483500003244,
"count": 1,
"self": 0.12054483500003244
}
}
}
}
}
}
}