JamesEJarvis's picture
First Push
ca85bf3
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0051778554916382,
"min": 1.0051778554916382,
"max": 2.850841760635376,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9608.4951171875,
"min": 9608.4951171875,
"max": 29258.189453125,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.010763168334961,
"min": 0.4187123775482178,
"max": 13.010763168334961,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2524.088134765625,
"min": 81.2302017211914,
"max": 2651.8447265625,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06841872785954882,
"min": 0.06083166407576451,
"max": 0.07478306438207774,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2736749114381953,
"min": 0.26513361569274874,
"max": 0.3739153219103887,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21629036736546778,
"min": 0.1329953646590915,
"max": 0.2844530167971171,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8651614694618711,
"min": 0.531981458636366,
"max": 1.4132217963536582,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.362658084800001e-06,
"min": 5.362658084800001e-06,
"max": 0.0002739385621648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.1450632339200004e-05,
"min": 2.1450632339200004e-05,
"max": 0.0013142528306239998,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10210672,
"min": 0.10210672,
"max": 0.20761871999999995,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40842688,
"min": 0.40842688,
"max": 1.0163136,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.59090909090909,
"min": 3.8181818181818183,
"max": 25.78181818181818,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1126.0,
"min": 168.0,
"max": 1418.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.59090909090909,
"min": 3.8181818181818183,
"max": 25.78181818181818,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1126.0,
"min": 168.0,
"max": 1418.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680788900",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680789526"
},
"total": 625.8838122110001,
"count": 1,
"self": 0.7380752690000918,
"children": {
"run_training.setup": {
"total": 0.20117395799996984,
"count": 1,
"self": 0.20117395799996984
},
"TrainerController.start_learning": {
"total": 624.944562984,
"count": 1,
"self": 0.8224402120023342,
"children": {
"TrainerController._reset_env": {
"total": 4.503825919000008,
"count": 1,
"self": 4.503825919000008
},
"TrainerController.advance": {
"total": 619.3904230329975,
"count": 22750,
"self": 0.4076515319976579,
"children": {
"env_step": {
"total": 618.9827715009999,
"count": 22750,
"self": 451.3973156260016,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.18324239599485,
"count": 22750,
"self": 2.437499054995442,
"children": {
"TorchPolicy.evaluate": {
"total": 164.7457433409994,
"count": 22750,
"self": 164.7457433409994
}
}
},
"workers": {
"total": 0.4022134790034215,
"count": 22750,
"self": 0.0,
"children": {
"worker_root": {
"total": 622.7321380549978,
"count": 22750,
"is_parallel": true,
"self": 288.29527991399027,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007069178000051579,
"count": 1,
"is_parallel": true,
"self": 0.004372646000035729,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00269653200001585,
"count": 10,
"is_parallel": true,
"self": 0.00269653200001585
}
}
},
"UnityEnvironment.step": {
"total": 0.04195419900003117,
"count": 1,
"is_parallel": true,
"self": 0.0006739550000816052,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048259299995834226,
"count": 1,
"is_parallel": true,
"self": 0.00048259299995834226
},
"communicator.exchange": {
"total": 0.03878601899998557,
"count": 1,
"is_parallel": true,
"self": 0.03878601899998557
},
"steps_from_proto": {
"total": 0.0020116320000056476,
"count": 1,
"is_parallel": true,
"self": 0.0004222539999432229,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015893780000624247,
"count": 10,
"is_parallel": true,
"self": 0.0015893780000624247
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 334.4368581410075,
"count": 22749,
"is_parallel": true,
"self": 13.594683386995769,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.380644005000818,
"count": 22749,
"is_parallel": true,
"self": 7.380644005000818
},
"communicator.exchange": {
"total": 268.66307402399747,
"count": 22749,
"is_parallel": true,
"self": 268.66307402399747
},
"steps_from_proto": {
"total": 44.798456725013466,
"count": 22749,
"is_parallel": true,
"self": 9.178264315028798,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.62019240998467,
"count": 227490,
"is_parallel": true,
"self": 35.62019240998467
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011838300008548686,
"count": 1,
"self": 0.00011838300008548686,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 614.3522099160016,
"count": 556875,
"is_parallel": true,
"self": 14.464264445007757,
"children": {
"process_trajectory": {
"total": 339.1460279739942,
"count": 556875,
"is_parallel": true,
"self": 337.2234935659942,
"children": {
"RLTrainer._checkpoint": {
"total": 1.92253440799999,
"count": 5,
"is_parallel": true,
"self": 1.92253440799999
}
}
},
"_update_policy": {
"total": 260.74191749699963,
"count": 113,
"is_parallel": true,
"self": 98.25234598400107,
"children": {
"TorchPPOOptimizer.update": {
"total": 162.48957151299857,
"count": 5760,
"is_parallel": true,
"self": 162.48957151299857
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22775543700004164,
"count": 1,
"self": 0.0011918860001287612,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22656355099991288,
"count": 1,
"self": 0.22656355099991288
}
}
}
}
}
}
}