tbumi's picture
SnowballTarget v1
2526962 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0796109437942505,
"min": 1.0796109437942505,
"max": 2.876474618911743,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10308.125,
"min": 10308.125,
"max": 29489.6171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.24938678741455,
"min": 0.4310693144798279,
"max": 12.24938678741455,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2388.63037109375,
"min": 83.62744903564453,
"max": 2467.2373046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0674392565986137,
"min": 0.06335604707995776,
"max": 0.07496949384421539,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2697570263944548,
"min": 0.26043823304672853,
"max": 0.36409337668124403,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.19532251043939125,
"min": 0.13174782367080778,
"max": 0.28544034362934967,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.781290041757565,
"min": 0.5269912946832311,
"max": 1.4272017181467482,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.59090909090909,
"min": 3.4318181818181817,
"max": 24.59090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1082.0,
"min": 151.0,
"max": 1298.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.59090909090909,
"min": 3.4318181818181817,
"max": 24.59090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1082.0,
"min": 151.0,
"max": 1298.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720956561",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720957008"
},
"total": 447.3311486220001,
"count": 1,
"self": 0.3851858440001479,
"children": {
"run_training.setup": {
"total": 0.058093473999974776,
"count": 1,
"self": 0.058093473999974776
},
"TrainerController.start_learning": {
"total": 446.887869304,
"count": 1,
"self": 0.5407376589957948,
"children": {
"TrainerController._reset_env": {
"total": 2.9571533150000278,
"count": 1,
"self": 2.9571533150000278
},
"TrainerController.advance": {
"total": 443.298005623004,
"count": 18202,
"self": 0.26325885801122695,
"children": {
"env_step": {
"total": 443.03474676499275,
"count": 18202,
"self": 285.9624138359792,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.80027009600622,
"count": 18202,
"self": 1.4943035670058862,
"children": {
"TorchPolicy.evaluate": {
"total": 155.30596652900033,
"count": 18202,
"self": 155.30596652900033
}
}
},
"workers": {
"total": 0.2720628330073396,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 445.6514048490109,
"count": 18202,
"is_parallel": true,
"self": 227.6408128530079,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0057136699999773555,
"count": 1,
"is_parallel": true,
"self": 0.004085680999764918,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016279890002124375,
"count": 10,
"is_parallel": true,
"self": 0.0016279890002124375
}
}
},
"UnityEnvironment.step": {
"total": 0.036793411999951786,
"count": 1,
"is_parallel": true,
"self": 0.000743742999816277,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039813800003685174,
"count": 1,
"is_parallel": true,
"self": 0.00039813800003685174
},
"communicator.exchange": {
"total": 0.033681586000056996,
"count": 1,
"is_parallel": true,
"self": 0.033681586000056996
},
"steps_from_proto": {
"total": 0.001969945000041662,
"count": 1,
"is_parallel": true,
"self": 0.00042742900006942364,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015425159999722382,
"count": 10,
"is_parallel": true,
"self": 0.0015425159999722382
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 218.01059199600297,
"count": 18201,
"is_parallel": true,
"self": 10.150978272008615,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.346831367003688,
"count": 18201,
"is_parallel": true,
"self": 5.346831367003688
},
"communicator.exchange": {
"total": 168.84826303500859,
"count": 18201,
"is_parallel": true,
"self": 168.84826303500859
},
"steps_from_proto": {
"total": 33.664519321982084,
"count": 18201,
"is_parallel": true,
"self": 6.315915090006229,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.348604231975855,
"count": 182010,
"is_parallel": true,
"self": 27.348604231975855
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012311300019973714,
"count": 1,
"self": 0.00012311300019973714,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 437.53850407289565,
"count": 666938,
"is_parallel": true,
"self": 14.258051981833319,
"children": {
"process_trajectory": {
"total": 242.01108071706176,
"count": 666938,
"is_parallel": true,
"self": 240.90498774006187,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1060929769998893,
"count": 4,
"is_parallel": true,
"self": 1.1060929769998893
}
}
},
"_update_policy": {
"total": 181.26937137400057,
"count": 90,
"is_parallel": true,
"self": 58.1248166769991,
"children": {
"TorchPPOOptimizer.update": {
"total": 123.14455469700147,
"count": 4581,
"is_parallel": true,
"self": 123.14455469700147
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.0918495939999957,
"count": 1,
"self": 0.0010004030000345665,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09084919099996114,
"count": 1,
"self": 0.09084919099996114
}
}
}
}
}
}
}