Francesco-A's picture
First Push
5e88248
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.750018835067749,
"min": 0.750018835067749,
"max": 2.8655190467834473,
"count": 25
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7119.9287109375,
"min": 7119.9287109375,
"max": 29345.78125,
"count": 25
},
"SnowballTarget.Step.mean": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Step.sum": {
"value": 249944.0,
"min": 9952.0,
"max": 249944.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.235240936279297,
"min": 0.3614194393157959,
"max": 13.267122268676758,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2567.63671875,
"min": 70.11537170410156,
"max": 2711.94091796875,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 25
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07322696178116352,
"min": 0.06277650989021841,
"max": 0.07769774061036479,
"count": 25
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29290784712465406,
"min": 0.25110603956087363,
"max": 0.3791876325978556,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18015146167839274,
"min": 0.11752472590773785,
"max": 0.2648907637625348,
"count": 25
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.720605846713571,
"min": 0.4700989036309514,
"max": 1.3218833812895945,
"count": 25
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.7456980848e-06,
"min": 5.7456980848e-06,
"max": 0.0002935056021648,
"count": 25
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.29827923392e-05,
"min": 2.29827923392e-05,
"max": 0.0014081280306239997,
"count": 25
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10191520000000001,
"min": 0.10191520000000001,
"max": 0.19783520000000002,
"count": 25
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40766080000000005,
"min": 0.40766080000000005,
"max": 0.9693760000000002,
"count": 25
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010556848000000002,
"min": 0.00010556848000000002,
"max": 0.004891976480000001,
"count": 25
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004222739200000001,
"min": 0.0004222739200000001,
"max": 0.023471862399999998,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.09090909090909,
"min": 3.6363636363636362,
"max": 26.11111111111111,
"count": 25
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1148.0,
"min": 160.0,
"max": 1431.0,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.09090909090909,
"min": 3.6363636363636362,
"max": 26.11111111111111,
"count": 25
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1148.0,
"min": 160.0,
"max": 1431.0,
"count": 25
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691937997",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget-v1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691938602"
},
"total": 604.8950028369999,
"count": 1,
"self": 0.43547828999976446,
"children": {
"run_training.setup": {
"total": 0.06410522500004845,
"count": 1,
"self": 0.06410522500004845
},
"TrainerController.start_learning": {
"total": 604.3954193220001,
"count": 1,
"self": 0.6698564709979564,
"children": {
"TrainerController._reset_env": {
"total": 5.235871419999967,
"count": 1,
"self": 5.235871419999967
},
"TrainerController.advance": {
"total": 598.3371146770022,
"count": 22733,
"self": 0.34287271501329997,
"children": {
"env_step": {
"total": 597.9942419619889,
"count": 22733,
"self": 433.5858333149822,
"children": {
"SubprocessEnvManager._take_step": {
"total": 164.0539418150048,
"count": 22733,
"self": 2.3038203060249316,
"children": {
"TorchPolicy.evaluate": {
"total": 161.75012150897987,
"count": 22733,
"self": 161.75012150897987
}
}
},
"workers": {
"total": 0.35446683200189,
"count": 22733,
"self": 0.0,
"children": {
"worker_root": {
"total": 602.5202703760019,
"count": 22733,
"is_parallel": true,
"self": 282.6369573349948,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00819687899991095,
"count": 1,
"is_parallel": true,
"self": 0.0059329060001118705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00226397299979908,
"count": 10,
"is_parallel": true,
"self": 0.00226397299979908
}
}
},
"UnityEnvironment.step": {
"total": 0.035740783000051124,
"count": 1,
"is_parallel": true,
"self": 0.0007171829998924295,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00030385800005205965,
"count": 1,
"is_parallel": true,
"self": 0.00030385800005205965
},
"communicator.exchange": {
"total": 0.03236979000007523,
"count": 1,
"is_parallel": true,
"self": 0.03236979000007523
},
"steps_from_proto": {
"total": 0.002349952000031408,
"count": 1,
"is_parallel": true,
"self": 0.0004256890001670399,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019242629998643679,
"count": 10,
"is_parallel": true,
"self": 0.0019242629998643679
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 319.8833130410071,
"count": 22732,
"is_parallel": true,
"self": 13.43688321700813,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.047220670999536,
"count": 22732,
"is_parallel": true,
"self": 7.047220670999536
},
"communicator.exchange": {
"total": 251.36359821400129,
"count": 22732,
"is_parallel": true,
"self": 251.36359821400129
},
"steps_from_proto": {
"total": 48.03561093899816,
"count": 22732,
"is_parallel": true,
"self": 8.693040506007605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 39.342570432990556,
"count": 227320,
"is_parallel": true,
"self": 39.342570432990556
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001512679998540989,
"count": 1,
"self": 0.0001512679998540989,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 593.6487764780245,
"count": 564675,
"is_parallel": true,
"self": 12.7672821100266,
"children": {
"process_trajectory": {
"total": 326.62262663099943,
"count": 564675,
"is_parallel": true,
"self": 325.7784531959994,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8441734350000161,
"count": 4,
"is_parallel": true,
"self": 0.8441734350000161
}
}
},
"_update_policy": {
"total": 254.25886773699847,
"count": 113,
"is_parallel": true,
"self": 97.54988089100527,
"children": {
"TorchPPOOptimizer.update": {
"total": 156.7089868459932,
"count": 5760,
"is_parallel": true,
"self": 156.7089868459932
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15242548600008377,
"count": 1,
"self": 0.0008560349999697792,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15156945100011399,
"count": 1,
"self": 0.15156945100011399
}
}
}
}
}
}
}