ryanaspen's picture
First Push
eeb5d27
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9494575262069702,
"min": 0.9494575262069702,
"max": 2.837203025817871,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9086.30859375,
"min": 9086.30859375,
"max": 29055.796875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.786092758178711,
"min": 0.42909008264541626,
"max": 11.786092758178711,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2298.2880859375,
"min": 83.24347686767578,
"max": 2402.05029296875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06913151310365327,
"min": 0.0625574576097778,
"max": 0.07388134899243311,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2765260524146131,
"min": 0.252493306226896,
"max": 0.3694067449621655,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17171131523654742,
"min": 0.15162304830486323,
"max": 0.2876447317939178,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6868452609461897,
"min": 0.6064921932194529,
"max": 1.3086305216831318,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.795454545454547,
"min": 4.295454545454546,
"max": 23.345454545454544,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1003.0,
"min": 189.0,
"max": 1284.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.795454545454547,
"min": 4.295454545454546,
"max": 23.345454545454544,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1003.0,
"min": 189.0,
"max": 1284.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679966849",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679967306"
},
"total": 456.413522813,
"count": 1,
"self": 0.43571812400000454,
"children": {
"run_training.setup": {
"total": 0.11103221199994096,
"count": 1,
"self": 0.11103221199994096
},
"TrainerController.start_learning": {
"total": 455.86677247700004,
"count": 1,
"self": 0.4988553170040859,
"children": {
"TrainerController._reset_env": {
"total": 9.484559331000014,
"count": 1,
"self": 9.484559331000014
},
"TrainerController.advance": {
"total": 445.73497215199586,
"count": 18203,
"self": 0.2649171719959895,
"children": {
"env_step": {
"total": 445.4700549799999,
"count": 18203,
"self": 322.8176135300056,
"children": {
"SubprocessEnvManager._take_step": {
"total": 122.39673699099967,
"count": 18203,
"self": 1.924815501000353,
"children": {
"TorchPolicy.evaluate": {
"total": 120.47192148999932,
"count": 18203,
"self": 120.47192148999932
}
}
},
"workers": {
"total": 0.25570445899461447,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 454.5281172940071,
"count": 18203,
"is_parallel": true,
"self": 217.51545681001312,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005544266000015341,
"count": 1,
"is_parallel": true,
"self": 0.00411207000001923,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001432195999996111,
"count": 10,
"is_parallel": true,
"self": 0.001432195999996111
}
}
},
"UnityEnvironment.step": {
"total": 0.033288758000026064,
"count": 1,
"is_parallel": true,
"self": 0.0004028640000797168,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044442800003707816,
"count": 1,
"is_parallel": true,
"self": 0.00044442800003707816
},
"communicator.exchange": {
"total": 0.03138480799998433,
"count": 1,
"is_parallel": true,
"self": 0.03138480799998433
},
"steps_from_proto": {
"total": 0.001056657999924937,
"count": 1,
"is_parallel": true,
"self": 0.0002684789998284032,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007881790000965339,
"count": 10,
"is_parallel": true,
"self": 0.0007881790000965339
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 237.01266048399395,
"count": 18202,
"is_parallel": true,
"self": 9.492394476003938,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.086826395997832,
"count": 18202,
"is_parallel": true,
"self": 5.086826395997832
},
"communicator.exchange": {
"total": 192.12825403699173,
"count": 18202,
"is_parallel": true,
"self": 192.12825403699173
},
"steps_from_proto": {
"total": 30.30518557500045,
"count": 18202,
"is_parallel": true,
"self": 5.962567652983694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.342617922016757,
"count": 182020,
"is_parallel": true,
"self": 24.342617922016757
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010491400007595075,
"count": 1,
"self": 0.00010491400007595075,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 442.4698047139549,
"count": 407531,
"is_parallel": true,
"self": 9.676120866909969,
"children": {
"process_trajectory": {
"total": 245.86394945804545,
"count": 407531,
"is_parallel": true,
"self": 245.2011334620455,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6628159959999493,
"count": 4,
"is_parallel": true,
"self": 0.6628159959999493
}
}
},
"_update_policy": {
"total": 186.92973438899946,
"count": 90,
"is_parallel": true,
"self": 70.31111547799912,
"children": {
"TorchPPOOptimizer.update": {
"total": 116.61861891100034,
"count": 4587,
"is_parallel": true,
"self": 116.61861891100034
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14828076300000248,
"count": 1,
"self": 0.0008878209999920728,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1473929420000104,
"count": 1,
"self": 0.1473929420000104
}
}
}
}
}
}
}