MarsDev99's picture
First Push
e44406c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.9358608722686768,
"min": 1.9358608722686768,
"max": 2.8482613563537598,
"count": 8
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 19931.623046875,
"min": 19931.623046875,
"max": 29075.05078125,
"count": 8
},
"SnowballTarget.Step.mean": {
"value": 79968.0,
"min": 9952.0,
"max": 79968.0,
"count": 8
},
"SnowballTarget.Step.sum": {
"value": 79968.0,
"min": 9952.0,
"max": 79968.0,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 7.461071491241455,
"min": 0.2932460904121399,
"max": 7.461071491241455,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1522.05859375,
"min": 56.889739990234375,
"max": 1522.05859375,
"count": 8
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06488326634965179,
"min": 0.06308913043809725,
"max": 0.073130420345942,
"count": 8
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.32441633174825896,
"min": 0.27174333068450873,
"max": 0.3463972639349545,
"count": 8
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.27419328701262385,
"min": 0.12139458527363947,
"max": 0.293154604148631,
"count": 8
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.3709664350631192,
"min": 0.4855783410945579,
"max": 1.3946799031075308,
"count": 8
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.9830093389999996e-05,
"min": 1.9830093389999996e-05,
"max": 0.00027970500676499996,
"count": 8
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.915046694999999e-05,
"min": 9.915046694999999e-05,
"max": 0.0012129000957,
"count": 8
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10661000000000001,
"min": 0.10661000000000001,
"max": 0.19323500000000002,
"count": 8
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.53305,
"min": 0.47594000000000003,
"max": 0.9043000000000001,
"count": 8
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0003398390000000001,
"min": 0.0003398390000000001,
"max": 0.0046624265000000005,
"count": 8
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0016991950000000006,
"min": 0.0016991950000000006,
"max": 0.02022457,
"count": 8
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 8
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 8
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 17.145454545454545,
"min": 3.340909090909091,
"max": 17.145454545454545,
"count": 8
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 943.0,
"min": 147.0,
"max": 943.0,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 17.145454545454545,
"min": 3.340909090909091,
"max": 17.145454545454545,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 943.0,
"min": 147.0,
"max": 943.0,
"count": 8
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742992964",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742993204"
},
"total": 239.80585605699991,
"count": 1,
"self": 0.9279279909999332,
"children": {
"run_training.setup": {
"total": 0.03956769199999144,
"count": 1,
"self": 0.03956769199999144
},
"TrainerController.start_learning": {
"total": 238.838360374,
"count": 1,
"self": 0.2520769939985712,
"children": {
"TrainerController._reset_env": {
"total": 3.7963413200000105,
"count": 1,
"self": 3.7963413200000105
},
"TrainerController.advance": {
"total": 234.68782666300137,
"count": 7328,
"self": 0.26227418099938404,
"children": {
"env_step": {
"total": 167.32215617700479,
"count": 7328,
"self": 144.2428043570102,
"children": {
"SubprocessEnvManager._take_step": {
"total": 22.92308768499754,
"count": 7328,
"self": 0.7601262289899751,
"children": {
"TorchPolicy.evaluate": {
"total": 22.162961456007565,
"count": 7328,
"self": 22.162961456007565
}
}
},
"workers": {
"total": 0.15626413499705905,
"count": 7328,
"self": 0.0,
"children": {
"worker_root": {
"total": 237.92768148299967,
"count": 7328,
"is_parallel": true,
"self": 111.8503312030025,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008004618999962076,
"count": 1,
"is_parallel": true,
"self": 0.005578962999948089,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002425656000013987,
"count": 10,
"is_parallel": true,
"self": 0.002425656000013987
}
}
},
"UnityEnvironment.step": {
"total": 0.05041669100000945,
"count": 1,
"is_parallel": true,
"self": 0.000837118000049486,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000571386000046914,
"count": 1,
"is_parallel": true,
"self": 0.000571386000046914
},
"communicator.exchange": {
"total": 0.04665097399993101,
"count": 1,
"is_parallel": true,
"self": 0.04665097399993101
},
"steps_from_proto": {
"total": 0.002357212999982039,
"count": 1,
"is_parallel": true,
"self": 0.0004612010002347233,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018960119997473157,
"count": 10,
"is_parallel": true,
"self": 0.0018960119997473157
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 126.07735027999718,
"count": 7327,
"is_parallel": true,
"self": 6.346507882003834,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.3031995200017263,
"count": 7327,
"is_parallel": true,
"self": 3.3031995200017263
},
"communicator.exchange": {
"total": 98.31721397600029,
"count": 7327,
"is_parallel": true,
"self": 98.31721397600029
},
"steps_from_proto": {
"total": 18.110428901991327,
"count": 7327,
"is_parallel": true,
"self": 3.355520439002362,
"children": {
"_process_rank_one_or_two_observation": {
"total": 14.754908462988965,
"count": 73270,
"is_parallel": true,
"self": 14.754908462988965
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 67.1033963049972,
"count": 7328,
"self": 0.31217211599368966,
"children": {
"process_trajectory": {
"total": 12.763442678003798,
"count": 7328,
"self": 12.301210169003639,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46223250900015955,
"count": 4,
"self": 0.46223250900015955
}
}
},
"_update_policy": {
"total": 54.027781510999716,
"count": 36,
"self": 21.843356452996204,
"children": {
"TorchPPOOptimizer.update": {
"total": 32.18442505800351,
"count": 1833,
"self": 32.18442505800351
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5640000583516667e-06,
"count": 1,
"self": 1.5640000583516667e-06
},
"TrainerController._save_models": {
"total": 0.10211383299997578,
"count": 1,
"self": 0.0018527319998611347,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10026110100011465,
"count": 1,
"self": 0.10026110100011465
}
}
}
}
}
}
}