ShayanDarabi's picture
First Push
cbfd2a8 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.209212303161621,
"min": 1.209212303161621,
"max": 2.8242242336273193,
"count": 8
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 59483.5703125,
"min": 59483.5703125,
"max": 142905.75,
"count": 8
},
"SnowballTarget.Step.mean": {
"value": 399968.0,
"min": 49952.0,
"max": 399968.0,
"count": 8
},
"SnowballTarget.Step.sum": {
"value": 399968.0,
"min": 49952.0,
"max": 399968.0,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.875980377197266,
"min": 0.6528778672218323,
"max": 11.875980377197266,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5926.1142578125,
"min": 324.48028564453125,
"max": 5926.1142578125,
"count": 8
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 8
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 50347.0,
"min": 48158.0,
"max": 50347.0,
"count": 8
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.805668016194332,
"min": 5.131147540983607,
"max": 24.805668016194332,
"count": 8
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 6127.0,
"min": 1252.0,
"max": 6127.0,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.805668016194332,
"min": 5.131147540983607,
"max": 24.805668016194332,
"count": 8
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 6127.0,
"min": 1252.0,
"max": 6127.0,
"count": 8
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04958774980433663,
"min": 0.0474208151068298,
"max": 0.05259989834922439,
"count": 8
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2975264988260198,
"min": 0.2516684853641588,
"max": 0.2975264988260198,
"count": 8
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.23279210964059519,
"min": 0.1735464586492847,
"max": 0.3095150000714009,
"count": 8
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.3967526578435712,
"min": 0.8677322932464234,
"max": 1.8570900004284057,
"count": 8
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.6250093500000004e-05,
"min": 1.6250093500000004e-05,
"max": 0.00023350000660000004,
"count": 8
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 9.750056100000003e-05,
"min": 9.750056100000003e-05,
"max": 0.0012195001122,
"count": 8
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1065,
"min": 0.1065,
"max": 0.1934,
"count": 8
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.639,
"min": 0.5930000000000001,
"max": 1.0878,
"count": 8
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00065935,
"min": 0.00065935,
"max": 0.00934066,
"count": 8
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0039561,
"min": 0.0039561,
"max": 0.04879121999999999,
"count": 8
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 8
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1758145051",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1758145728"
},
"total": 677.4436931350001,
"count": 1,
"self": 0.32109526099975483,
"children": {
"run_training.setup": {
"total": 0.026454711000042153,
"count": 1,
"self": 0.026454711000042153
},
"TrainerController.start_learning": {
"total": 677.0961431630003,
"count": 1,
"self": 0.6332221040242985,
"children": {
"TrainerController._reset_env": {
"total": 1.9476772569996683,
"count": 1,
"self": 1.9476772569996683
},
"TrainerController.advance": {
"total": 674.4041693889767,
"count": 36400,
"self": 0.6272513990229527,
"children": {
"env_step": {
"total": 508.21731679997174,
"count": 36400,
"self": 381.10785631494036,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.708802362999,
"count": 36400,
"self": 2.2649682640167157,
"children": {
"TorchPolicy.evaluate": {
"total": 124.44383409898228,
"count": 36400,
"self": 124.44383409898228
}
}
},
"workers": {
"total": 0.4006581220323824,
"count": 36400,
"self": 0.0,
"children": {
"worker_root": {
"total": 675.8870114989945,
"count": 36400,
"is_parallel": true,
"self": 339.12555576698423,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020347529998616665,
"count": 1,
"is_parallel": true,
"self": 0.0006430769994949515,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001391676000366715,
"count": 10,
"is_parallel": true,
"self": 0.001391676000366715
}
}
},
"UnityEnvironment.step": {
"total": 0.02611853799999153,
"count": 1,
"is_parallel": true,
"self": 0.0003562830002010742,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002890020000450022,
"count": 1,
"is_parallel": true,
"self": 0.0002890020000450022
},
"communicator.exchange": {
"total": 0.02425154799993834,
"count": 1,
"is_parallel": true,
"self": 0.02425154799993834
},
"steps_from_proto": {
"total": 0.0012217049998071161,
"count": 1,
"is_parallel": true,
"self": 0.0002494999998816638,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009722049999254523,
"count": 10,
"is_parallel": true,
"self": 0.0009722049999254523
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 336.7614557320103,
"count": 36399,
"is_parallel": true,
"self": 11.670995255972684,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.499668004979867,
"count": 36399,
"is_parallel": true,
"self": 6.499668004979867
},
"communicator.exchange": {
"total": 279.07402830503634,
"count": 36399,
"is_parallel": true,
"self": 279.07402830503634
},
"steps_from_proto": {
"total": 39.5167641660214,
"count": 36399,
"is_parallel": true,
"self": 7.585900557974128,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.930863608047275,
"count": 363990,
"is_parallel": true,
"self": 31.930863608047275
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 165.559601189982,
"count": 36400,
"self": 0.8393168810180214,
"children": {
"process_trajectory": {
"total": 37.85711024496686,
"count": 36400,
"self": 37.85711024496686
},
"_update_policy": {
"total": 126.86317406399712,
"count": 45,
"self": 71.88893897100843,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.97423509298869,
"count": 4590,
"self": 54.97423509298869
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0249996194033884e-06,
"count": 1,
"self": 1.0249996194033884e-06
},
"TrainerController._save_models": {
"total": 0.11107338800002253,
"count": 1,
"self": 0.0010064709999824117,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11006691700004012,
"count": 1,
"self": 0.11006691700004012
}
}
}
}
}
}
}