Hawk91's picture
Snowball Target with mean reward 25.545
f147533
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9354865550994873,
"min": 0.9354865550994873,
"max": 2.8722100257873535,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8932.025390625,
"min": 8932.025390625,
"max": 29382.708984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.810511589050293,
"min": 0.257997065782547,
"max": 12.810511589050293,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2498.0498046875,
"min": 50.05143356323242,
"max": 2577.759765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06008655945840349,
"min": 0.06008655945840349,
"max": 0.0741279540250224,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24034623783361397,
"min": 0.24034623783361397,
"max": 0.37063977012511196,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2078600084664775,
"min": 0.10768823973222763,
"max": 0.3054620669168584,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.83144003386591,
"min": 0.4307529589289105,
"max": 1.527310334584292,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.545454545454547,
"min": 3.25,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1124.0,
"min": 143.0,
"max": 1400.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.545454545454547,
"min": 3.25,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1124.0,
"min": 143.0,
"max": 1400.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677138947",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677139410"
},
"total": 463.47052588500014,
"count": 1,
"self": 0.37984798000024966,
"children": {
"run_training.setup": {
"total": 0.10549156799993398,
"count": 1,
"self": 0.10549156799993398
},
"TrainerController.start_learning": {
"total": 462.98518633699996,
"count": 1,
"self": 0.541574343007369,
"children": {
"TrainerController._reset_env": {
"total": 6.665862384000093,
"count": 1,
"self": 6.665862384000093
},
"TrainerController.advance": {
"total": 455.65087900799244,
"count": 18204,
"self": 0.2771743479968336,
"children": {
"env_step": {
"total": 455.3737046599956,
"count": 18204,
"self": 312.54787491396814,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.54882917699751,
"count": 18204,
"self": 1.5272437019996232,
"children": {
"TorchPolicy.evaluate": {
"total": 141.0215854749979,
"count": 18204,
"self": 31.46740249699019,
"children": {
"TorchPolicy.sample_actions": {
"total": 109.5541829780077,
"count": 18204,
"self": 109.5541829780077
}
}
}
}
},
"workers": {
"total": 0.2770005690299513,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 461.44890504302,
"count": 18204,
"is_parallel": true,
"self": 224.23987221698053,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002070859000014025,
"count": 1,
"is_parallel": true,
"self": 0.0006761070003449277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013947519996690971,
"count": 10,
"is_parallel": true,
"self": 0.0013947519996690971
}
}
},
"UnityEnvironment.step": {
"total": 0.10738446400000612,
"count": 1,
"is_parallel": true,
"self": 0.0012749979996442562,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039370200011035195,
"count": 1,
"is_parallel": true,
"self": 0.00039370200011035195
},
"communicator.exchange": {
"total": 0.09729692900009468,
"count": 1,
"is_parallel": true,
"self": 0.09729692900009468
},
"steps_from_proto": {
"total": 0.008418835000156832,
"count": 1,
"is_parallel": true,
"self": 0.003079715000012584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0053391200001442485,
"count": 10,
"is_parallel": true,
"self": 0.0053391200001442485
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.20903282603945,
"count": 18203,
"is_parallel": true,
"self": 9.415901487032215,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.408103846002859,
"count": 18203,
"is_parallel": true,
"self": 5.408103846002859
},
"communicator.exchange": {
"total": 189.00754204401346,
"count": 18203,
"is_parallel": true,
"self": 189.00754204401346
},
"steps_from_proto": {
"total": 33.37748544899091,
"count": 18203,
"is_parallel": true,
"self": 7.088438828007156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.289046620983754,
"count": 182030,
"is_parallel": true,
"self": 26.289046620983754
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011613200013016467,
"count": 1,
"self": 0.00011613200013016467,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 452.3344780099269,
"count": 401295,
"is_parallel": true,
"self": 9.971107443877145,
"children": {
"process_trajectory": {
"total": 258.0638663320499,
"count": 401295,
"is_parallel": true,
"self": 257.2695492850489,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7943170470009591,
"count": 4,
"is_parallel": true,
"self": 0.7943170470009591
}
}
},
"_update_policy": {
"total": 184.29950423399987,
"count": 90,
"is_parallel": true,
"self": 63.77217038398999,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.52733385000988,
"count": 4587,
"is_parallel": true,
"self": 120.52733385000988
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12675446999992346,
"count": 1,
"self": 0.001091866999558988,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12566260300036447,
"count": 1,
"self": 0.12566260300036447
}
}
}
}
}
}
}