uraskargi's picture
First ML-Agents experience
a03d837
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9338898658752441,
"min": 0.9338898658752441,
"max": 2.8689517974853516,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8947.5986328125,
"min": 8947.5986328125,
"max": 29412.494140625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.722090721130371,
"min": 0.3543870747089386,
"max": 12.722090721130371,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2480.8076171875,
"min": 68.75109100341797,
"max": 2580.09326171875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06389760770024641,
"min": 0.06048203771865433,
"max": 0.07418364721975819,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25559043080098565,
"min": 0.25559043080098565,
"max": 0.37091823609879093,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19208907745048112,
"min": 0.10942604021379249,
"max": 0.2969048646150851,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7683563098019245,
"min": 0.43770416085516994,
"max": 1.4845243230754255,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.136363636363637,
"min": 3.090909090909091,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1106.0,
"min": 136.0,
"max": 1380.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.136363636363637,
"min": 3.090909090909091,
"max": 25.136363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1106.0,
"min": 136.0,
"max": 1380.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1691712841",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1691713326"
},
"total": 485.52707172500004,
"count": 1,
"self": 0.4334915419999561,
"children": {
"run_training.setup": {
"total": 0.06936588300004587,
"count": 1,
"self": 0.06936588300004587
},
"TrainerController.start_learning": {
"total": 485.02421430000004,
"count": 1,
"self": 0.6060629789923269,
"children": {
"TrainerController._reset_env": {
"total": 5.524695185000041,
"count": 1,
"self": 5.524695185000041
},
"TrainerController.advance": {
"total": 478.7550886480077,
"count": 18204,
"self": 0.29797910400543515,
"children": {
"env_step": {
"total": 478.45710954400226,
"count": 18204,
"self": 349.2869493369958,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.87395347800657,
"count": 18204,
"self": 1.9341646150005545,
"children": {
"TorchPolicy.evaluate": {
"total": 126.93978886300602,
"count": 18204,
"self": 126.93978886300602
}
}
},
"workers": {
"total": 0.2962067289998913,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 483.4705310920076,
"count": 18204,
"is_parallel": true,
"self": 227.15128053500905,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0054941759999564965,
"count": 1,
"is_parallel": true,
"self": 0.003980941000008897,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015132349999475991,
"count": 10,
"is_parallel": true,
"self": 0.0015132349999475991
}
}
},
"UnityEnvironment.step": {
"total": 0.07169022999994468,
"count": 1,
"is_parallel": true,
"self": 0.0006551129998797478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004355849999910788,
"count": 1,
"is_parallel": true,
"self": 0.0004355849999910788
},
"communicator.exchange": {
"total": 0.06530670600000121,
"count": 1,
"is_parallel": true,
"self": 0.06530670600000121
},
"steps_from_proto": {
"total": 0.005292826000072637,
"count": 1,
"is_parallel": true,
"self": 0.0035043499998437255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017884760002289113,
"count": 10,
"is_parallel": true,
"self": 0.0017884760002289113
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 256.31925055699855,
"count": 18203,
"is_parallel": true,
"self": 10.811274374019945,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.318291938990342,
"count": 18203,
"is_parallel": true,
"self": 5.318291938990342
},
"communicator.exchange": {
"total": 203.85328888399135,
"count": 18203,
"is_parallel": true,
"self": 203.85328888399135
},
"steps_from_proto": {
"total": 36.336395359996914,
"count": 18203,
"is_parallel": true,
"self": 6.699836900941136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.636558459055777,
"count": 182030,
"is_parallel": true,
"self": 29.636558459055777
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.198599996627308e-05,
"count": 1,
"self": 7.198599996627308e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 475.1739823440238,
"count": 451394,
"is_parallel": true,
"self": 10.012070388012035,
"children": {
"process_trajectory": {
"total": 259.32658341101205,
"count": 451394,
"is_parallel": true,
"self": 258.487185257012,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8393981540000368,
"count": 4,
"is_parallel": true,
"self": 0.8393981540000368
}
}
},
"_update_policy": {
"total": 205.8353285449997,
"count": 90,
"is_parallel": true,
"self": 84.10598880500118,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.72933973999852,
"count": 4584,
"is_parallel": true,
"self": 121.72933973999852
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1382955020000054,
"count": 1,
"self": 0.0009108990000186168,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13738460299998678,
"count": 1,
"self": 0.13738460299998678
}
}
}
}
}
}
}