imerino's picture
First Push
2216fda
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0255050659179688,
"min": 1.0255050659179688,
"max": 2.857381582260132,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9791.5224609375,
"min": 9791.5224609375,
"max": 29262.4453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.271628379821777,
"min": 0.31202349066734314,
"max": 13.271628379821777,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2587.967529296875,
"min": 60.532554626464844,
"max": 2694.39404296875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06709628630281206,
"min": 0.0660254170812122,
"max": 0.073068799982315,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26838514521124823,
"min": 0.26838514521124823,
"max": 0.36534399991157496,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18484398663756163,
"min": 0.134730610608719,
"max": 0.2682727135389167,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7393759465502465,
"min": 0.538922442434876,
"max": 1.2804830801837586,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000007e-06,
"min": 8.082097306000007e-06,
"max": 0.000291906002698,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400003e-05,
"min": 3.232838922400003e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19730200000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.9617200000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048653698,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.113636363636363,
"min": 4.0227272727272725,
"max": 26.113636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1149.0,
"min": 177.0,
"max": 1435.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.113636363636363,
"min": 4.0227272727272725,
"max": 26.113636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1149.0,
"min": 177.0,
"max": 1435.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702631820",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702632482"
},
"total": 662.2152684109999,
"count": 1,
"self": 0.44019715399997494,
"children": {
"run_training.setup": {
"total": 0.09758264799995686,
"count": 1,
"self": 0.09758264799995686
},
"TrainerController.start_learning": {
"total": 661.677488609,
"count": 1,
"self": 0.6700265350035579,
"children": {
"TrainerController._reset_env": {
"total": 3.672423515000105,
"count": 1,
"self": 3.672423515000105
},
"TrainerController.advance": {
"total": 657.2333848379963,
"count": 18199,
"self": 0.3439279099879968,
"children": {
"env_step": {
"total": 656.8894569280083,
"count": 18199,
"self": 487.13806016001433,
"children": {
"SubprocessEnvManager._take_step": {
"total": 169.41118277699331,
"count": 18199,
"self": 1.7047257149976076,
"children": {
"TorchPolicy.evaluate": {
"total": 167.7064570619957,
"count": 18199,
"self": 167.7064570619957
}
}
},
"workers": {
"total": 0.3402139910006099,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 660.1498411719937,
"count": 18199,
"is_parallel": true,
"self": 397.7416923949917,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006567320999920412,
"count": 1,
"is_parallel": true,
"self": 0.004427912000096512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021394089998238996,
"count": 10,
"is_parallel": true,
"self": 0.0021394089998238996
}
}
},
"UnityEnvironment.step": {
"total": 0.04394821999994747,
"count": 1,
"is_parallel": true,
"self": 0.0007454950000465033,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005324390000396306,
"count": 1,
"is_parallel": true,
"self": 0.0005324390000396306
},
"communicator.exchange": {
"total": 0.04011252299994794,
"count": 1,
"is_parallel": true,
"self": 0.04011252299994794
},
"steps_from_proto": {
"total": 0.0025577629999133933,
"count": 1,
"is_parallel": true,
"self": 0.0004764590001968827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020813039997165106,
"count": 10,
"is_parallel": true,
"self": 0.0020813039997165106
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 262.408148777002,
"count": 18198,
"is_parallel": true,
"self": 12.167809974015427,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.267987956987895,
"count": 18198,
"is_parallel": true,
"self": 6.267987956987895
},
"communicator.exchange": {
"total": 204.1337092820014,
"count": 18198,
"is_parallel": true,
"self": 204.1337092820014
},
"steps_from_proto": {
"total": 39.83864156399727,
"count": 18198,
"is_parallel": true,
"self": 7.64818420797576,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.19045735602151,
"count": 181980,
"is_parallel": true,
"self": 32.19045735602151
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.000132229999962874,
"count": 1,
"self": 0.000132229999962874,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 650.3785606670385,
"count": 769143,
"is_parallel": true,
"self": 17.46123089795435,
"children": {
"process_trajectory": {
"total": 278.7448345500844,
"count": 769143,
"is_parallel": true,
"self": 277.9663911640846,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7784433859998217,
"count": 4,
"is_parallel": true,
"self": 0.7784433859998217
}
}
},
"_update_policy": {
"total": 354.17249521899976,
"count": 90,
"is_parallel": true,
"self": 135.31892008199998,
"children": {
"TorchPPOOptimizer.update": {
"total": 218.85357513699978,
"count": 12224,
"is_parallel": true,
"self": 218.85357513699978
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.101521491000085,
"count": 1,
"self": 0.0011211210000965366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10040036999998847,
"count": 1,
"self": 0.10040036999998847
}
}
}
}
}
}
}