hartman23's picture
Add first optimized SnowbalThrower
1ec3516 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.602304458618164,
"min": 1.602304458618164,
"max": 2.8853836059570312,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 15475.056640625,
"min": 15475.056640625,
"max": 29549.212890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.462786078453064,
"min": 0.20903053879737854,
"max": 1.462786078453064,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 285.2432861328125,
"min": 40.55192565917969,
"max": 295.2460021972656,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04278062743071046,
"min": 0.03740328233245583,
"max": 0.05381372995640656,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2994643920149732,
"min": 0.25109103050393367,
"max": 0.3492709052506447,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.064781530272393,
"min": 0.0370886461454488,
"max": 0.09625423115988573,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.45347071190675103,
"min": 0.2596205230181416,
"max": 0.5775253869593143,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.382829907157725e-05,
"min": 8.382829907157725e-05,
"max": 0.0032732852856490837,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0005867980935010407,
"min": 0.0005867980935010407,
"max": 0.021797750579737155,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.09912168332660047,
"min": 0.06570389216053214,
"max": 0.09912168332660047,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6938517832862033,
"min": 0.3942233529631929,
"max": 0.6938517832862033,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0011226110353096256,
"min": 0.0011226110353096256,
"max": 0.04345472694873276,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.007858277247167379,
"min": 0.007858277247167379,
"max": 0.2893809637295982,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.386363636363637,
"min": 3.022727272727273,
"max": 24.386363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1073.0,
"min": 133.0,
"max": 1335.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 18.882754775610838,
"min": 2.3405465077270162,
"max": 18.882754775610838,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 830.8412101268768,
"min": 102.98404633998871,
"max": 1033.712035536766,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1734712356",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget14 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1734712934"
},
"total": 578.3493716190005,
"count": 1,
"self": 1.054238938000708,
"children": {
"run_training.setup": {
"total": 0.0741157310003473,
"count": 1,
"self": 0.0741157310003473
},
"TrainerController.start_learning": {
"total": 577.2210169499995,
"count": 1,
"self": 1.023016461919724,
"children": {
"TrainerController._reset_env": {
"total": 3.221277755000301,
"count": 1,
"self": 3.221277755000301
},
"TrainerController.advance": {
"total": 572.8177740130795,
"count": 18215,
"self": 0.4886110569404991,
"children": {
"env_step": {
"total": 572.329162956139,
"count": 18215,
"self": 412.958888128067,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.93467012807014,
"count": 18215,
"self": 3.019371363108803,
"children": {
"TorchPolicy.evaluate": {
"total": 155.91529876496134,
"count": 18215,
"self": 155.91529876496134
}
}
},
"workers": {
"total": 0.43560470000193163,
"count": 18215,
"self": 0.0,
"children": {
"worker_root": {
"total": 574.958547064989,
"count": 18215,
"is_parallel": true,
"self": 249.5135799089685,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003545269999449374,
"count": 1,
"is_parallel": true,
"self": 0.0008927559983931133,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002652514001056261,
"count": 10,
"is_parallel": true,
"self": 0.002652514001056261
}
}
},
"UnityEnvironment.step": {
"total": 0.1095645350005725,
"count": 1,
"is_parallel": true,
"self": 0.0008891070010577096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045683400003326824,
"count": 1,
"is_parallel": true,
"self": 0.00045683400003326824
},
"communicator.exchange": {
"total": 0.10594400099944323,
"count": 1,
"is_parallel": true,
"self": 0.10594400099944323
},
"steps_from_proto": {
"total": 0.002274593000038294,
"count": 1,
"is_parallel": true,
"self": 0.0004177509981673211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001856842001870973,
"count": 10,
"is_parallel": true,
"self": 0.001856842001870973
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 325.44496715602054,
"count": 18214,
"is_parallel": true,
"self": 15.366280455989,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.844114936006918,
"count": 18214,
"is_parallel": true,
"self": 7.844114936006918
},
"communicator.exchange": {
"total": 255.65704095400451,
"count": 18214,
"is_parallel": true,
"self": 255.65704095400451
},
"steps_from_proto": {
"total": 46.57753081002011,
"count": 18214,
"is_parallel": true,
"self": 9.352727159182905,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.2248036508372,
"count": 182140,
"is_parallel": true,
"self": 37.2248036508372
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0003579949998311349,
"count": 1,
"self": 0.0003579949998311349,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 568.5701288763703,
"count": 516846,
"is_parallel": true,
"self": 12.44683584858194,
"children": {
"process_trajectory": {
"total": 251.86072053578755,
"count": 516846,
"is_parallel": true,
"self": 250.94444069578822,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9162798399993335,
"count": 4,
"is_parallel": true,
"self": 0.9162798399993335
}
}
},
"_update_policy": {
"total": 304.2625724920008,
"count": 137,
"is_parallel": true,
"self": 84.32923142198342,
"children": {
"TorchPPOOptimizer.update": {
"total": 219.93334107001738,
"count": 2046,
"is_parallel": true,
"self": 219.93334107001738
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15859072500006732,
"count": 1,
"self": 0.0022051480000300216,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1563855770000373,
"count": 1,
"self": 0.1563855770000373
}
}
}
}
}
}
}