averydd's picture
First Push
4dbdaa0 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9699838161468506,
"min": 0.9699838161468506,
"max": 2.872222661972046,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9304.0849609375,
"min": 9304.0849609375,
"max": 29446.02734375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.97990894317627,
"min": 0.3812831938266754,
"max": 13.012848854064941,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2531.082275390625,
"min": 73.96894073486328,
"max": 2654.62109375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06987307209280569,
"min": 0.06310927961977522,
"max": 0.07485789516720295,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27949228837122275,
"min": 0.2524371184791009,
"max": 0.3646052831078551,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1902139256251793,
"min": 0.1142068327594019,
"max": 0.28664275025035824,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7608557025007172,
"min": 0.4568273310376076,
"max": 1.3899727624421025,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.347009730600001e-05,
"min": 1.347009730600001e-05,
"max": 0.00048647000270600005,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.388038922400004e-05,
"min": 5.388038922400004e-05,
"max": 0.00230860003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.25,
"min": 3.590909090909091,
"max": 25.836363636363636,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1111.0,
"min": 158.0,
"max": 1421.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.25,
"min": 3.590909090909091,
"max": 25.836363636363636,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1111.0,
"min": 158.0,
"max": 1421.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730964810",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730965417"
},
"total": 606.515037178,
"count": 1,
"self": 0.5791858499999307,
"children": {
"run_training.setup": {
"total": 0.07194894099995963,
"count": 1,
"self": 0.07194894099995963
},
"TrainerController.start_learning": {
"total": 605.863902387,
"count": 1,
"self": 0.8719104330004939,
"children": {
"TrainerController._reset_env": {
"total": 6.353950586999986,
"count": 1,
"self": 6.353950586999986
},
"TrainerController.advance": {
"total": 598.5524749329996,
"count": 18204,
"self": 0.44079491400793813,
"children": {
"env_step": {
"total": 598.1116800189917,
"count": 18204,
"self": 460.5285866319929,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.1722062080006,
"count": 18204,
"self": 2.460940305002339,
"children": {
"TorchPolicy.evaluate": {
"total": 134.71126590299826,
"count": 18204,
"self": 134.71126590299826
}
}
},
"workers": {
"total": 0.41088717899822313,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 603.8692599249997,
"count": 18204,
"is_parallel": true,
"self": 288.678596038006,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026498940000010407,
"count": 1,
"is_parallel": true,
"self": 0.0008113430000662447,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001838550999934796,
"count": 10,
"is_parallel": true,
"self": 0.001838550999934796
}
}
},
"UnityEnvironment.step": {
"total": 0.04435041200002843,
"count": 1,
"is_parallel": true,
"self": 0.0009761590000039178,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047568899998395864,
"count": 1,
"is_parallel": true,
"self": 0.00047568899998395864
},
"communicator.exchange": {
"total": 0.04054071900003464,
"count": 1,
"is_parallel": true,
"self": 0.04054071900003464
},
"steps_from_proto": {
"total": 0.0023578450000059092,
"count": 1,
"is_parallel": true,
"self": 0.0004575180000756518,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019003269999302574,
"count": 10,
"is_parallel": true,
"self": 0.0019003269999302574
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 315.1906638869937,
"count": 18203,
"is_parallel": true,
"self": 15.043272027000967,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.6687072699937175,
"count": 18203,
"is_parallel": true,
"self": 7.6687072699937175
},
"communicator.exchange": {
"total": 247.69030748400172,
"count": 18203,
"is_parallel": true,
"self": 247.69030748400172
},
"steps_from_proto": {
"total": 44.78837710599731,
"count": 18203,
"is_parallel": true,
"self": 9.283642563983506,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.50473454201381,
"count": 182030,
"is_parallel": true,
"self": 35.50473454201381
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016232999996645958,
"count": 1,
"self": 0.00016232999996645958,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 591.7496217469978,
"count": 793256,
"is_parallel": true,
"self": 17.596911781010363,
"children": {
"process_trajectory": {
"total": 318.50863661598714,
"count": 793256,
"is_parallel": true,
"self": 317.3355248529871,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1731117630000654,
"count": 4,
"is_parallel": true,
"self": 1.1731117630000654
}
}
},
"_update_policy": {
"total": 255.64407335000027,
"count": 90,
"is_parallel": true,
"self": 72.63145363799862,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.01261971200165,
"count": 4587,
"is_parallel": true,
"self": 183.01261971200165
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0854041039999629,
"count": 1,
"self": 0.0012824770000179342,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08412162699994497,
"count": 1,
"self": 0.08412162699994497
}
}
}
}
}
}
}