Stokrotka's picture
First Push
9f53d71
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8791131377220154,
"min": 0.8785493969917297,
"max": 2.8694639205932617,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8393.7724609375,
"min": 8393.7724609375,
"max": 29386.1796875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.67941951751709,
"min": 0.41996654868125916,
"max": 12.67941951751709,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2472.48681640625,
"min": 81.4735107421875,
"max": 2569.41650390625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07150173112712961,
"min": 0.06179333429101069,
"max": 0.07526826572093163,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28600692450851845,
"min": 0.24840202485610655,
"max": 0.3763413286046582,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18945974761656686,
"min": 0.1059214645487658,
"max": 0.26745245579411003,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7578389904662675,
"min": 0.4236858581950632,
"max": 1.3372622789705502,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.045454545454547,
"min": 3.272727272727273,
"max": 25.045454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1102.0,
"min": 144.0,
"max": 1347.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.045454545454547,
"min": 3.272727272727273,
"max": 25.045454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1102.0,
"min": 144.0,
"max": 1347.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677098234",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677098685"
},
"total": 451.35284838800004,
"count": 1,
"self": 0.3917832610001142,
"children": {
"run_training.setup": {
"total": 0.18660922199995866,
"count": 1,
"self": 0.18660922199995866
},
"TrainerController.start_learning": {
"total": 450.77445590499997,
"count": 1,
"self": 0.5420482830010656,
"children": {
"TrainerController._reset_env": {
"total": 9.10100631299997,
"count": 1,
"self": 9.10100631299997
},
"TrainerController.advance": {
"total": 441.011029951999,
"count": 18202,
"self": 0.2626317010025332,
"children": {
"env_step": {
"total": 440.74839825099644,
"count": 18202,
"self": 304.10721340400875,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.37135668399685,
"count": 18202,
"self": 1.4688679960148079,
"children": {
"TorchPolicy.evaluate": {
"total": 134.90248868798204,
"count": 18202,
"self": 31.04764251398808,
"children": {
"TorchPolicy.sample_actions": {
"total": 103.85484617399396,
"count": 18202,
"self": 103.85484617399396
}
}
}
}
},
"workers": {
"total": 0.2698281629908479,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 449.41448668901086,
"count": 18202,
"is_parallel": true,
"self": 217.6250865240063,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007821176999982526,
"count": 1,
"is_parallel": true,
"self": 0.004483459000084622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033377179998979045,
"count": 10,
"is_parallel": true,
"self": 0.0033377179998979045
}
}
},
"UnityEnvironment.step": {
"total": 0.03268953799999963,
"count": 1,
"is_parallel": true,
"self": 0.0003808920000096805,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037331499999027073,
"count": 1,
"is_parallel": true,
"self": 0.00037331499999027073
},
"communicator.exchange": {
"total": 0.030430769000020064,
"count": 1,
"is_parallel": true,
"self": 0.030430769000020064
},
"steps_from_proto": {
"total": 0.0015045619999796145,
"count": 1,
"is_parallel": true,
"self": 0.0004037820000917236,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011007799998878909,
"count": 10,
"is_parallel": true,
"self": 0.0011007799998878909
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 231.78940016500457,
"count": 18201,
"is_parallel": true,
"self": 9.2154623909924,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.088928693006608,
"count": 18201,
"is_parallel": true,
"self": 5.088928693006608
},
"communicator.exchange": {
"total": 186.82624670900515,
"count": 18201,
"is_parallel": true,
"self": 186.82624670900515
},
"steps_from_proto": {
"total": 30.65876237200041,
"count": 18201,
"is_parallel": true,
"self": 6.6881980109726555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.970564361027755,
"count": 182010,
"is_parallel": true,
"self": 23.970564361027755
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014942299992526387,
"count": 1,
"self": 0.00014942299992526387,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 437.92158436004786,
"count": 398075,
"is_parallel": true,
"self": 9.69654421600967,
"children": {
"process_trajectory": {
"total": 251.05841682703885,
"count": 398075,
"is_parallel": true,
"self": 250.30902646303878,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7493903640000781,
"count": 4,
"is_parallel": true,
"self": 0.7493903640000781
}
}
},
"_update_policy": {
"total": 177.16662331699933,
"count": 90,
"is_parallel": true,
"self": 59.480325247996575,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.68629806900276,
"count": 4587,
"is_parallel": true,
"self": 117.68629806900276
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12022193400002834,
"count": 1,
"self": 0.000822915000071589,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11939901899995675,
"count": 1,
"self": 0.11939901899995675
}
}
}
}
}
}
}