gchindemi's picture
First Push
a657381
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8152019381523132,
"min": 0.8152019381523132,
"max": 2.8638570308685303,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7792.51513671875,
"min": 7792.51513671875,
"max": 29391.765625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.57359790802002,
"min": 0.2432459592819214,
"max": 11.57359790802002,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2256.8515625,
"min": 47.18971633911133,
"max": 2339.421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06950537470045531,
"min": 0.06346931100144466,
"max": 0.07288050800287986,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27802149880182125,
"min": 0.25387724400577866,
"max": 0.3644025400143993,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19050212467417998,
"min": 0.10610019283679625,
"max": 0.28632834035099725,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7620084986967199,
"min": 0.424400771347185,
"max": 1.2922151661386676,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.772727272727273,
"min": 3.0,
"max": 22.818181818181817,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1002.0,
"min": 132.0,
"max": 1239.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.772727272727273,
"min": 3.0,
"max": 22.818181818181817,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1002.0,
"min": 132.0,
"max": 1239.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702147105",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702147570"
},
"total": 465.55053120599996,
"count": 1,
"self": 0.438762765999968,
"children": {
"run_training.setup": {
"total": 0.058895214000017404,
"count": 1,
"self": 0.058895214000017404
},
"TrainerController.start_learning": {
"total": 465.052873226,
"count": 1,
"self": 0.5261456849843853,
"children": {
"TrainerController._reset_env": {
"total": 3.5071018270000422,
"count": 1,
"self": 3.5071018270000422
},
"TrainerController.advance": {
"total": 460.9350592000153,
"count": 18200,
"self": 0.2563311770124983,
"children": {
"env_step": {
"total": 460.6787280230028,
"count": 18200,
"self": 309.6560578960058,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.7428781000009,
"count": 18200,
"self": 1.4270698770031913,
"children": {
"TorchPolicy.evaluate": {
"total": 149.3158082229977,
"count": 18200,
"self": 149.3158082229977
}
}
},
"workers": {
"total": 0.2797920269961196,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 463.84629081498997,
"count": 18200,
"is_parallel": true,
"self": 229.774504176993,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005254276000073332,
"count": 1,
"is_parallel": true,
"self": 0.003845808000164652,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014084679999086802,
"count": 10,
"is_parallel": true,
"self": 0.0014084679999086802
}
}
},
"UnityEnvironment.step": {
"total": 0.09306620600000315,
"count": 1,
"is_parallel": true,
"self": 0.0006192570000393971,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003636279999454928,
"count": 1,
"is_parallel": true,
"self": 0.0003636279999454928
},
"communicator.exchange": {
"total": 0.09014693000005991,
"count": 1,
"is_parallel": true,
"self": 0.09014693000005991
},
"steps_from_proto": {
"total": 0.0019363909999583484,
"count": 1,
"is_parallel": true,
"self": 0.00037864799980980024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015577430001485482,
"count": 10,
"is_parallel": true,
"self": 0.0015577430001485482
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 234.07178663799698,
"count": 18199,
"is_parallel": true,
"self": 10.350308430985365,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.28460205099384,
"count": 18199,
"is_parallel": true,
"self": 5.28460205099384
},
"communicator.exchange": {
"total": 185.50845610301155,
"count": 18199,
"is_parallel": true,
"self": 185.50845610301155
},
"steps_from_proto": {
"total": 32.92842005300622,
"count": 18199,
"is_parallel": true,
"self": 6.100231205012278,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.828188847993943,
"count": 181990,
"is_parallel": true,
"self": 26.828188847993943
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.575200003069767e-05,
"count": 1,
"self": 3.575200003069767e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 457.060961659035,
"count": 492769,
"is_parallel": true,
"self": 10.236636047022785,
"children": {
"process_trajectory": {
"total": 258.5054684810119,
"count": 492769,
"is_parallel": true,
"self": 257.901580936012,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6038875449999068,
"count": 4,
"is_parallel": true,
"self": 0.6038875449999068
}
}
},
"_update_policy": {
"total": 188.3188571310003,
"count": 90,
"is_parallel": true,
"self": 57.67780100099549,
"children": {
"TorchPPOOptimizer.update": {
"total": 130.6410561300048,
"count": 4587,
"is_parallel": true,
"self": 130.6410561300048
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08453076200021314,
"count": 1,
"self": 0.0008362290002423833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08369453299997076,
"count": 1,
"self": 0.08369453299997076
}
}
}
}
}
}
}