ArunAIML's picture
Snowball game agent
e344b00 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0338371992111206,
"min": 1.0199315547943115,
"max": 2.857163429260254,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9871.0771484375,
"min": 9871.0771484375,
"max": 29260.2109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.830883026123047,
"min": 0.4256573021411896,
"max": 12.830883026123047,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2502.022216796875,
"min": 82.5775146484375,
"max": 2596.598876953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07145661792909618,
"min": 0.05814909093116655,
"max": 0.0742870956547039,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2858264717163847,
"min": 0.2325963637246662,
"max": 0.3714354782735195,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21671707110077726,
"min": 0.12429600397316629,
"max": 0.2787435427088948,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.866868284403109,
"min": 0.49718401589266514,
"max": 1.393717713544474,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.181818181818183,
"min": 3.409090909090909,
"max": 25.613636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1108.0,
"min": 150.0,
"max": 1374.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.181818181818183,
"min": 3.409090909090909,
"max": 25.613636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1108.0,
"min": 150.0,
"max": 1374.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723282190",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723282620"
},
"total": 430.1108458709999,
"count": 1,
"self": 0.43409428699999353,
"children": {
"run_training.setup": {
"total": 0.06019147199992858,
"count": 1,
"self": 0.06019147199992858
},
"TrainerController.start_learning": {
"total": 429.616560112,
"count": 1,
"self": 0.5221749880072366,
"children": {
"TrainerController._reset_env": {
"total": 3.3058169979999548,
"count": 1,
"self": 3.3058169979999548
},
"TrainerController.advance": {
"total": 425.70096580399274,
"count": 18202,
"self": 0.2397714259842587,
"children": {
"env_step": {
"total": 425.4611943780085,
"count": 18202,
"self": 273.219927785011,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.97373609599867,
"count": 18202,
"self": 1.3926218340026253,
"children": {
"TorchPolicy.evaluate": {
"total": 150.58111426199605,
"count": 18202,
"self": 150.58111426199605
}
}
},
"workers": {
"total": 0.26753049699880194,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 428.5514817239973,
"count": 18202,
"is_parallel": true,
"self": 219.404728397001,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008495280000033745,
"count": 1,
"is_parallel": true,
"self": 0.0039357580001251335,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004559521999908611,
"count": 10,
"is_parallel": true,
"self": 0.004559521999908611
}
}
},
"UnityEnvironment.step": {
"total": 0.03471480200005317,
"count": 1,
"is_parallel": true,
"self": 0.0006260220001195194,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038387799997963157,
"count": 1,
"is_parallel": true,
"self": 0.00038387799997963157
},
"communicator.exchange": {
"total": 0.031757118999962586,
"count": 1,
"is_parallel": true,
"self": 0.031757118999962586
},
"steps_from_proto": {
"total": 0.001947782999991432,
"count": 1,
"is_parallel": true,
"self": 0.00037888000019847823,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015689029997929538,
"count": 10,
"is_parallel": true,
"self": 0.0015689029997929538
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 209.1467533269963,
"count": 18201,
"is_parallel": true,
"self": 9.745168618001912,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.070521683989227,
"count": 18201,
"is_parallel": true,
"self": 5.070521683989227
},
"communicator.exchange": {
"total": 162.28939001499748,
"count": 18201,
"is_parallel": true,
"self": 162.28939001499748
},
"steps_from_proto": {
"total": 32.041673010007685,
"count": 18201,
"is_parallel": true,
"self": 5.873843573005843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.167829437001842,
"count": 182010,
"is_parallel": true,
"self": 26.167829437001842
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00014044599993212614,
"count": 1,
"self": 0.00014044599993212614,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 420.19232761599164,
"count": 632567,
"is_parallel": true,
"self": 13.592537042963158,
"children": {
"process_trajectory": {
"total": 230.97754124502808,
"count": 632567,
"is_parallel": true,
"self": 229.99581252802795,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9817287170001237,
"count": 4,
"is_parallel": true,
"self": 0.9817287170001237
}
}
},
"_update_policy": {
"total": 175.6222493280004,
"count": 90,
"is_parallel": true,
"self": 55.28060483600132,
"children": {
"TorchPPOOptimizer.update": {
"total": 120.34164449199909,
"count": 4584,
"is_parallel": true,
"self": 120.34164449199909
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.087461876000134,
"count": 1,
"self": 0.0008754960001624568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08658637999997154,
"count": 1,
"self": 0.08658637999997154
}
}
}
}
}
}
}