dfm794's picture
second training attempt of SnowballTarget
26fee78
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8370776176452637,
"min": 0.8370776176452637,
"max": 2.8629672527313232,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8001.625,
"min": 8001.625,
"max": 29382.6328125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.658884048461914,
"min": 0.3955569565296173,
"max": 12.658884048461914,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2468.482421875,
"min": 76.73805236816406,
"max": 2579.69287109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06327738718034966,
"min": 0.06269589007213511,
"max": 0.07831900794468204,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2531095487213986,
"min": 0.2531095487213986,
"max": 0.37358045374732646,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21514710046204866,
"min": 0.13000477055388995,
"max": 0.2805738159838845,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8605884018481946,
"min": 0.5200190822155598,
"max": 1.3466144849856696,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.5,
"min": 3.6818181818181817,
"max": 25.21818181818182,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1078.0,
"min": 162.0,
"max": 1387.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.5,
"min": 3.6818181818181817,
"max": 25.21818181818182,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1078.0,
"min": 162.0,
"max": 1387.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673756873",
"python_version": "3.8.15 (default, Nov 24 2022, 15:19:38) \n[GCC 11.2.0]",
"command_line_arguments": "/home/dfm/anaconda3/envs/hf-drl-class-u5/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.20.0",
"end_time_seconds": "1673757292"
},
"total": 419.1107846312225,
"count": 1,
"self": 0.26931141689419746,
"children": {
"run_training.setup": {
"total": 0.011162672191858292,
"count": 1,
"self": 0.011162672191858292
},
"TrainerController.start_learning": {
"total": 418.83031054213643,
"count": 1,
"self": 0.36471307650208473,
"children": {
"TrainerController._reset_env": {
"total": 2.893760586157441,
"count": 1,
"self": 2.893760586157441
},
"TrainerController.advance": {
"total": 415.485241163522,
"count": 18202,
"self": 0.18188734352588654,
"children": {
"env_step": {
"total": 415.3033538199961,
"count": 18202,
"self": 333.6095225010067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 81.51922756806016,
"count": 18202,
"self": 0.988382151350379,
"children": {
"TorchPolicy.evaluate": {
"total": 80.53084541670978,
"count": 18202,
"self": 19.15562912262976,
"children": {
"TorchPolicy.sample_actions": {
"total": 61.37521629408002,
"count": 18202,
"self": 61.37521629408002
}
}
}
}
},
"workers": {
"total": 0.1746037509292364,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 417.9480272810906,
"count": 18202,
"is_parallel": true,
"self": 157.93524863384664,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003657398745417595,
"count": 1,
"is_parallel": true,
"self": 0.0010343007743358612,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026230979710817337,
"count": 10,
"is_parallel": true,
"self": 0.0026230979710817337
}
}
},
"UnityEnvironment.step": {
"total": 0.03668358363211155,
"count": 1,
"is_parallel": true,
"self": 0.0007486939430236816,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000513952225446701,
"count": 1,
"is_parallel": true,
"self": 0.000513952225446701
},
"communicator.exchange": {
"total": 0.032966187223792076,
"count": 1,
"is_parallel": true,
"self": 0.032966187223792076
},
"steps_from_proto": {
"total": 0.0024547502398490906,
"count": 1,
"is_parallel": true,
"self": 0.0005661826580762863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018885675817728043,
"count": 10,
"is_parallel": true,
"self": 0.0018885675817728043
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 260.012778647244,
"count": 18201,
"is_parallel": true,
"self": 10.959790403023362,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.955846441909671,
"count": 18201,
"is_parallel": true,
"self": 5.955846441909671
},
"communicator.exchange": {
"total": 208.3465098850429,
"count": 18201,
"is_parallel": true,
"self": 208.3465098850429
},
"steps_from_proto": {
"total": 34.75063191726804,
"count": 18201,
"is_parallel": true,
"self": 7.12611947208643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.624512445181608,
"count": 182010,
"is_parallel": true,
"self": 27.624512445181608
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013136118650436401,
"count": 1,
"self": 0.00013136118650436401,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 414.04736124537885,
"count": 287229,
"is_parallel": true,
"self": 4.284110466018319,
"children": {
"process_trajectory": {
"total": 227.7715497724712,
"count": 287229,
"is_parallel": true,
"self": 227.0762225780636,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6953271944075823,
"count": 4,
"is_parallel": true,
"self": 0.6953271944075823
}
}
},
"_update_policy": {
"total": 181.99170100688934,
"count": 90,
"is_parallel": true,
"self": 30.963989848271012,
"children": {
"TorchPPOOptimizer.update": {
"total": 151.02771115861833,
"count": 4587,
"is_parallel": true,
"self": 151.02771115861833
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08646435476839542,
"count": 1,
"self": 0.0004970673471689224,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0859672874212265,
"count": 1,
"self": 0.0859672874212265
}
}
}
}
}
}
}