ArthurSchwan's picture
First Push
82253be verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9780012965202332,
"min": 0.9648075103759766,
"max": 2.8778982162475586,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9983.4375,
"min": 8729.13671875,
"max": 30664.1015625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499912.0,
"min": 9952.0,
"max": 499912.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499912.0,
"min": 9952.0,
"max": 499912.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.493892669677734,
"min": 0.11736617982387543,
"max": 13.493892669677734,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1362.8831787109375,
"min": 11.384519577026367,
"max": 1384.6038818359375,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.566037735849058,
"min": 3.7045454545454546,
"max": 26.695652173913043,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1408.0,
"min": 163.0,
"max": 1462.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.566037735849058,
"min": 3.7045454545454546,
"max": 26.695652173913043,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1408.0,
"min": 163.0,
"max": 1462.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.052440652597522106,
"min": 0.040440473217399006,
"max": 0.06067363486748517,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.10488130519504421,
"min": 0.08088094643479801,
"max": 0.16149033968994286,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20077788712931616,
"min": 0.12738577981351637,
"max": 0.3156682547985339,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4015557742586323,
"min": 0.25477155962703274,
"max": 0.8888447877823138,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.000099000000002e-06,
"min": 2.000099000000002e-06,
"max": 0.00019736000132000004,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.000198000000004e-06,
"min": 4.000198000000004e-06,
"max": 0.00055776002112,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10099999999999999,
"min": 0.10099999999999999,
"max": 0.19868000000000002,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20199999999999999,
"min": 0.20199999999999999,
"max": 0.5788800000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00010990000000000011,
"min": 0.00010990000000000011,
"max": 0.009868132000000002,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00021980000000000022,
"min": 0.00021980000000000022,
"max": 0.027890112,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1753912588",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1753913588"
},
"total": 1000.0545781999999,
"count": 1,
"self": 0.44203283000024385,
"children": {
"run_training.setup": {
"total": 0.023090142000000924,
"count": 1,
"self": 0.023090142000000924
},
"TrainerController.start_learning": {
"total": 999.5894552279997,
"count": 1,
"self": 0.9762422539811269,
"children": {
"TrainerController._reset_env": {
"total": 3.050249360999942,
"count": 1,
"self": 3.050249360999942
},
"TrainerController.advance": {
"total": 995.4461496390188,
"count": 45528,
"self": 0.9756943310028419,
"children": {
"env_step": {
"total": 767.9604588899992,
"count": 45528,
"self": 589.4801228999534,
"children": {
"SubprocessEnvManager._take_step": {
"total": 177.93577107603755,
"count": 45528,
"self": 3.2406384570774662,
"children": {
"TorchPolicy.evaluate": {
"total": 174.6951326189601,
"count": 45528,
"self": 174.6951326189601
}
}
},
"workers": {
"total": 0.5445649140083333,
"count": 45528,
"self": 0.0,
"children": {
"worker_root": {
"total": 996.0919809290747,
"count": 45528,
"is_parallel": true,
"self": 478.48188736501993,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004690971999934845,
"count": 1,
"is_parallel": true,
"self": 0.0032492880002337188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014416839997011266,
"count": 10,
"is_parallel": true,
"self": 0.0014416839997011266
}
}
},
"UnityEnvironment.step": {
"total": 0.03652988799990453,
"count": 1,
"is_parallel": true,
"self": 0.0006306209998001577,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004417119998834096,
"count": 1,
"is_parallel": true,
"self": 0.0004417119998834096
},
"communicator.exchange": {
"total": 0.03351520900014293,
"count": 1,
"is_parallel": true,
"self": 0.03351520900014293
},
"steps_from_proto": {
"total": 0.0019423460000780324,
"count": 1,
"is_parallel": true,
"self": 0.0003645159999905445,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015778300000874879,
"count": 10,
"is_parallel": true,
"self": 0.0015778300000874879
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 517.6100935640548,
"count": 45527,
"is_parallel": true,
"self": 24.708169303090017,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.562412022017725,
"count": 45527,
"is_parallel": true,
"self": 13.562412022017725
},
"communicator.exchange": {
"total": 400.1725197149624,
"count": 45527,
"is_parallel": true,
"self": 400.1725197149624
},
"steps_from_proto": {
"total": 79.16699252398462,
"count": 45527,
"is_parallel": true,
"self": 14.228092707029191,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.93889981695543,
"count": 455270,
"is_parallel": true,
"self": 64.93889981695543
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 226.50999641801673,
"count": 45528,
"self": 1.14351924001744,
"children": {
"process_trajectory": {
"total": 54.9041498520005,
"count": 45528,
"self": 53.664709935000246,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2394399170002544,
"count": 10,
"self": 1.2394399170002544
}
}
},
"_update_policy": {
"total": 170.46232732599879,
"count": 113,
"self": 95.50227559700215,
"children": {
"TorchPPOOptimizer.update": {
"total": 74.96005172899663,
"count": 5763,
"self": 74.96005172899663
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.860001798893791e-07,
"count": 1,
"self": 8.860001798893791e-07
},
"TrainerController._save_models": {
"total": 0.11681308799961698,
"count": 1,
"self": 0.00167971899963959,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11513336899997739,
"count": 1,
"self": 0.11513336899997739
}
}
}
}
}
}
}