mikebernico's picture
First Push
f2dca17 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7698145508766174,
"min": 0.7698145508766174,
"max": 2.8572795391082764,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7316.3173828125,
"min": 7316.3173828125,
"max": 29167.109375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.499406814575195,
"min": 0.3974979817867279,
"max": 12.499406814575195,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2437.38427734375,
"min": 77.11460876464844,
"max": 2488.4404296875,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06771193218004305,
"min": 0.06653044981945891,
"max": 0.07707889926352757,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2708477287201722,
"min": 0.26786018175021037,
"max": 0.38539449631763784,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19185685074212505,
"min": 0.10696789967558146,
"max": 0.27507888379634593,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7674274029685002,
"min": 0.42787159870232583,
"max": 1.3359254201253257,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.25,
"min": 3.090909090909091,
"max": 25.25,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1111.0,
"min": 136.0,
"max": 1371.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.25,
"min": 3.090909090909091,
"max": 25.25,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1111.0,
"min": 136.0,
"max": 1371.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752531389",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752531851"
},
"total": 461.55342900600004,
"count": 1,
"self": 0.43645929100011926,
"children": {
"run_training.setup": {
"total": 0.02352122799999279,
"count": 1,
"self": 0.02352122799999279
},
"TrainerController.start_learning": {
"total": 461.09344848699993,
"count": 1,
"self": 0.4520145959958768,
"children": {
"TrainerController._reset_env": {
"total": 3.4330011149999677,
"count": 1,
"self": 3.4330011149999677
},
"TrainerController.advance": {
"total": 457.12537055900395,
"count": 18192,
"self": 0.4667793030105827,
"children": {
"env_step": {
"total": 333.63325899300105,
"count": 18192,
"self": 256.9016111640066,
"children": {
"SubprocessEnvManager._take_step": {
"total": 76.45877372599313,
"count": 18192,
"self": 1.3849454899910256,
"children": {
"TorchPolicy.evaluate": {
"total": 75.0738282360021,
"count": 18192,
"self": 75.0738282360021
}
}
},
"workers": {
"total": 0.2728741030012998,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 459.3901435149937,
"count": 18192,
"is_parallel": true,
"self": 234.70442048399258,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004941105999989759,
"count": 1,
"is_parallel": true,
"self": 0.0035933860000341156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013477199999556433,
"count": 10,
"is_parallel": true,
"self": 0.0013477199999556433
}
}
},
"UnityEnvironment.step": {
"total": 0.04370741100001396,
"count": 1,
"is_parallel": true,
"self": 0.0005897770000160563,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035510899999735557,
"count": 1,
"is_parallel": true,
"self": 0.00035510899999735557
},
"communicator.exchange": {
"total": 0.04096142299999883,
"count": 1,
"is_parallel": true,
"self": 0.04096142299999883
},
"steps_from_proto": {
"total": 0.0018011020000017197,
"count": 1,
"is_parallel": true,
"self": 0.00036575799998672665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001435344000014993,
"count": 10,
"is_parallel": true,
"self": 0.001435344000014993
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 224.68572303100115,
"count": 18191,
"is_parallel": true,
"self": 10.482294002986634,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.840728061009372,
"count": 18191,
"is_parallel": true,
"self": 5.840728061009372
},
"communicator.exchange": {
"total": 174.06183081001058,
"count": 18191,
"is_parallel": true,
"self": 174.06183081001058
},
"steps_from_proto": {
"total": 34.30087015699456,
"count": 18191,
"is_parallel": true,
"self": 6.355984238013036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.944885918981527,
"count": 181910,
"is_parallel": true,
"self": 27.944885918981527
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 123.02533226299232,
"count": 18192,
"self": 0.5848565009922027,
"children": {
"process_trajectory": {
"total": 27.37809686000037,
"count": 18192,
"self": 26.952858776000312,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4252380840000569,
"count": 4,
"self": 0.4252380840000569
}
}
},
"_update_policy": {
"total": 95.06237890199975,
"count": 90,
"self": 39.051550040997995,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.010828861001755,
"count": 4587,
"self": 56.010828861001755
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.610000688553555e-07,
"count": 1,
"self": 9.610000688553555e-07
},
"TrainerController._save_models": {
"total": 0.08306125600006453,
"count": 1,
"self": 0.0008538070001122833,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08220744899995225,
"count": 1,
"self": 0.08220744899995225
}
}
}
}
}
}
}