GadH's picture
First Push
5abb50e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9472499489784241,
"min": 0.9472499489784241,
"max": 2.851076602935791,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9044.3427734375,
"min": 9044.3427734375,
"max": 29197.875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.514106750488281,
"min": 0.2743290364742279,
"max": 12.514106750488281,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2440.250732421875,
"min": 53.21983337402344,
"max": 2525.765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.059714054184059576,
"min": 0.059714054184059576,
"max": 0.07307829050057311,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2388562167362383,
"min": 0.2388562167362383,
"max": 0.3629877564529249,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18964851928838328,
"min": 0.12208514087422587,
"max": 0.29672983610162545,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7585940771535331,
"min": 0.48834056349690347,
"max": 1.3703364040337356,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.477272727272727,
"min": 3.3863636363636362,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1121.0,
"min": 149.0,
"max": 1346.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.477272727272727,
"min": 3.3863636363636362,
"max": 25.477272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1121.0,
"min": 149.0,
"max": 1346.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686231941",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686232550"
},
"total": 608.6044473279999,
"count": 1,
"self": 0.8507900519999794,
"children": {
"run_training.setup": {
"total": 0.04530838499999845,
"count": 1,
"self": 0.04530838499999845
},
"TrainerController.start_learning": {
"total": 607.7083488909999,
"count": 1,
"self": 0.8026990139940153,
"children": {
"TrainerController._reset_env": {
"total": 4.397871489999943,
"count": 1,
"self": 4.397871489999943
},
"TrainerController.advance": {
"total": 602.2562475620056,
"count": 18211,
"self": 0.4038738980009384,
"children": {
"env_step": {
"total": 601.8523736640046,
"count": 18211,
"self": 438.23945906698987,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.22875548800744,
"count": 18211,
"self": 2.4854541730088613,
"children": {
"TorchPolicy.evaluate": {
"total": 160.74330131499858,
"count": 18211,
"self": 160.74330131499858
}
}
},
"workers": {
"total": 0.38415910900732797,
"count": 18211,
"self": 0.0,
"children": {
"worker_root": {
"total": 605.390217956995,
"count": 18211,
"is_parallel": true,
"self": 275.21900936001225,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005778977999966628,
"count": 1,
"is_parallel": true,
"self": 0.004079777999891121,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001699200000075507,
"count": 10,
"is_parallel": true,
"self": 0.001699200000075507
}
}
},
"UnityEnvironment.step": {
"total": 0.04080067600000348,
"count": 1,
"is_parallel": true,
"self": 0.0006607459999941057,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046798500000022614,
"count": 1,
"is_parallel": true,
"self": 0.00046798500000022614
},
"communicator.exchange": {
"total": 0.0370661619999737,
"count": 1,
"is_parallel": true,
"self": 0.0370661619999737
},
"steps_from_proto": {
"total": 0.0026057830000354443,
"count": 1,
"is_parallel": true,
"self": 0.0004933179998261039,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021124650002093404,
"count": 10,
"is_parallel": true,
"self": 0.0021124650002093404
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 330.17120859698275,
"count": 18210,
"is_parallel": true,
"self": 14.009403256974792,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.17038587899799,
"count": 18210,
"is_parallel": true,
"self": 7.17038587899799
},
"communicator.exchange": {
"total": 261.39006024400715,
"count": 18210,
"is_parallel": true,
"self": 261.39006024400715
},
"steps_from_proto": {
"total": 47.60135921700282,
"count": 18210,
"is_parallel": true,
"self": 9.47530174400731,
"children": {
"_process_rank_one_or_two_observation": {
"total": 38.12605747299551,
"count": 182100,
"is_parallel": true,
"self": 38.12605747299551
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00017286400020566361,
"count": 1,
"self": 0.00017286400020566361,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 597.0415260409357,
"count": 604389,
"is_parallel": true,
"self": 14.405445290003968,
"children": {
"process_trajectory": {
"total": 334.564939730931,
"count": 604389,
"is_parallel": true,
"self": 333.142787254931,
"children": {
"RLTrainer._checkpoint": {
"total": 1.422152475999951,
"count": 4,
"is_parallel": true,
"self": 1.422152475999951
}
}
},
"_update_policy": {
"total": 248.0711410200007,
"count": 90,
"is_parallel": true,
"self": 93.17057751000664,
"children": {
"TorchPPOOptimizer.update": {
"total": 154.90056350999407,
"count": 4584,
"is_parallel": true,
"self": 154.90056350999407
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.25135796100016705,
"count": 1,
"self": 0.0012914750002437358,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2500664859999233,
"count": 1,
"self": 0.2500664859999233
}
}
}
}
}
}
}