miguepez's picture
First Push
5650420 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7273720502853394,
"min": 0.7273720502853394,
"max": 2.7979791164398193,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 14401.966796875,
"min": 14401.966796875,
"max": 57369.76171875,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 19992.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 19992.0,
"max": 199984.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.030179023742676,
"min": 1.1284499168395996,
"max": 13.030179023742676,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 5199.04150390625,
"min": 450.25152587890625,
"max": 5199.04150390625,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06388534142720474,
"min": 0.06388534142720474,
"max": 0.07315084368319197,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.5749680728448426,
"min": 0.5749680728448426,
"max": 0.6583575931487277,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18471286328984243,
"min": 0.17604525040374552,
"max": 0.2716292394142525,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.662415769608582,
"min": 1.5844072536337097,
"max": 2.4446631547282722,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.1776094556000006e-05,
"min": 2.1776094556000006e-05,
"max": 0.00037817600545600005,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00019598485100400006,
"min": 0.00019598485100400006,
"max": 0.0034035840491040007,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10544400000000001,
"min": 0.10544400000000001,
"max": 0.19454400000000002,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.9489960000000001,
"min": 0.9489960000000001,
"max": 1.7508960000000002,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00028165560000000005,
"min": 0.00028165560000000005,
"max": 0.0047277456,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0025349004000000007,
"min": 0.0025349004000000007,
"max": 0.0425497104,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 19701.0,
"min": 19701.0,
"max": 19701.0,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.7979797979798,
"min": 5.404040404040404,
"max": 25.7979797979798,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 2554.0,
"min": 535.0,
"max": 2554.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.7979797979798,
"min": 5.404040404040404,
"max": 25.7979797979798,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 2554.0,
"min": 535.0,
"max": 2554.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748414469",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748414882"
},
"total": 413.2558686599998,
"count": 1,
"self": 0.4438873199997033,
"children": {
"run_training.setup": {
"total": 0.02965529800007971,
"count": 1,
"self": 0.02965529800007971
},
"TrainerController.start_learning": {
"total": 412.782326042,
"count": 1,
"self": 0.3389531480002006,
"children": {
"TrainerController._reset_env": {
"total": 3.6728264000000763,
"count": 1,
"self": 3.6728264000000763
},
"TrainerController.advance": {
"total": 408.68416498199986,
"count": 18192,
"self": 0.35618187599243356,
"children": {
"env_step": {
"total": 288.81044256702353,
"count": 18192,
"self": 220.44717880906705,
"children": {
"SubprocessEnvManager._take_step": {
"total": 68.16555426698369,
"count": 18192,
"self": 1.2206363869941015,
"children": {
"TorchPolicy.evaluate": {
"total": 66.94491787998959,
"count": 18192,
"self": 66.94491787998959
}
}
},
"workers": {
"total": 0.1977094909727839,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 411.39937476199066,
"count": 18192,
"is_parallel": true,
"self": 217.85587383799952,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004912291000209734,
"count": 1,
"is_parallel": true,
"self": 0.003551191000497056,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013610999997126783,
"count": 10,
"is_parallel": true,
"self": 0.0013610999997126783
}
}
},
"UnityEnvironment.step": {
"total": 0.0359621679999691,
"count": 1,
"is_parallel": true,
"self": 0.0005905769999117183,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039945400021679234,
"count": 1,
"is_parallel": true,
"self": 0.00039945400021679234
},
"communicator.exchange": {
"total": 0.03312588199992206,
"count": 1,
"is_parallel": true,
"self": 0.03312588199992206
},
"steps_from_proto": {
"total": 0.0018462549999185285,
"count": 1,
"is_parallel": true,
"self": 0.00040436900007989607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014418859998386324,
"count": 10,
"is_parallel": true,
"self": 0.0014418859998386324
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 193.54350092399113,
"count": 18191,
"is_parallel": true,
"self": 9.468067610971957,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.325981763001437,
"count": 18191,
"is_parallel": true,
"self": 5.325981763001437
},
"communicator.exchange": {
"total": 148.70434467798532,
"count": 18191,
"is_parallel": true,
"self": 148.70434467798532
},
"steps_from_proto": {
"total": 30.04510687203242,
"count": 18191,
"is_parallel": true,
"self": 5.323646552033551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.72146031999887,
"count": 181910,
"is_parallel": true,
"self": 24.72146031999887
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 119.5175405389839,
"count": 18192,
"self": 0.40123433497160477,
"children": {
"process_trajectory": {
"total": 26.37485031001256,
"count": 18192,
"self": 25.970361752012877,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4044885579996844,
"count": 4,
"self": 0.4044885579996844
}
}
},
"_update_policy": {
"total": 92.74145589399973,
"count": 90,
"self": 37.29387848200781,
"children": {
"TorchPPOOptimizer.update": {
"total": 55.447577411991915,
"count": 4587,
"self": 55.447577411991915
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0700000530050602e-06,
"count": 1,
"self": 1.0700000530050602e-06
},
"TrainerController._save_models": {
"total": 0.08638044199983597,
"count": 1,
"self": 0.000837871999692652,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08554257000014331,
"count": 1,
"self": 0.08554257000014331
}
}
}
}
}
}
}