Max87152's picture
First Push
2832f97 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.8013758659362793,
"min": 2.8013758659362793,
"max": 2.890321969985962,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 28288.29296875,
"min": 27265.11328125,
"max": 29313.64453125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 9960.0,
"max": 199960.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 9960.0,
"max": 199960.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5115336179733276,
"min": 0.07916709780693054,
"max": 0.5115336179733276,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 127.8833999633789,
"min": 19.71260643005371,
"max": 127.8833999633789,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 5.061224489795919,
"min": 2.8181818181818183,
"max": 5.7272727272727275,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 248.0,
"min": 124.0,
"max": 315.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 5.061224489795919,
"min": 2.8181818181818183,
"max": 5.7272727272727275,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 248.0,
"min": 124.0,
"max": 315.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.01738821638185376,
"min": 0.011816814654351523,
"max": 0.020669989972763383,
"count": 16
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.01738821638185376,
"min": 0.011816814654351523,
"max": 0.020669989972763383,
"count": 16
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1030973620712757,
"min": 0.058712189396222435,
"max": 0.1030973620712757,
"count": 16
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.1030973620712757,
"min": 0.058712189396222435,
"max": 0.1030973620712757,
"count": 16
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4400985599999973e-06,
"min": 1.4400985599999973e-06,
"max": 9.384000616000001e-05,
"count": 16
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.4400985599999973e-06,
"min": 1.4400985599999973e-06,
"max": 9.384000616000001e-05,
"count": 16
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10144000000000004,
"min": 0.10144000000000004,
"max": 0.19383999999999996,
"count": 16
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10144000000000004,
"min": 0.10144000000000004,
"max": 0.19383999999999996,
"count": 16
},
"SnowballTarget.Policy.Beta.mean": {
"value": 8.185599999999987e-05,
"min": 8.185599999999987e-05,
"max": 0.004692616000000001,
"count": 16
},
"SnowballTarget.Policy.Beta.sum": {
"value": 8.185599999999987e-05,
"min": 8.185599999999987e-05,
"max": 0.004692616000000001,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714915732",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowBallTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1714916163"
},
"total": 430.85380109300013,
"count": 1,
"self": 0.7475019030000567,
"children": {
"run_training.setup": {
"total": 0.05471110000007684,
"count": 1,
"self": 0.05471110000007684
},
"TrainerController.start_learning": {
"total": 430.05158809,
"count": 1,
"self": 0.5302272300118602,
"children": {
"TrainerController._reset_env": {
"total": 2.843432587000052,
"count": 1,
"self": 2.843432587000052
},
"TrainerController.advance": {
"total": 426.5729817609881,
"count": 18212,
"self": 0.2300196089952351,
"children": {
"env_step": {
"total": 426.3429621519929,
"count": 18212,
"self": 285.5669493789883,
"children": {
"SubprocessEnvManager._take_step": {
"total": 140.51535705000015,
"count": 18212,
"self": 1.3259321000032287,
"children": {
"TorchPolicy.evaluate": {
"total": 139.18942494999692,
"count": 18212,
"self": 139.18942494999692
}
}
},
"workers": {
"total": 0.2606557230044473,
"count": 18212,
"self": 0.0,
"children": {
"worker_root": {
"total": 428.8879212720112,
"count": 18212,
"is_parallel": true,
"self": 223.98110484500467,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007679638000126943,
"count": 1,
"is_parallel": true,
"self": 0.00611878100062313,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015608569995038124,
"count": 10,
"is_parallel": true,
"self": 0.0015608569995038124
}
}
},
"UnityEnvironment.step": {
"total": 0.06171682900003361,
"count": 1,
"is_parallel": true,
"self": 0.0006564159998561081,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003976549999151757,
"count": 1,
"is_parallel": true,
"self": 0.0003976549999151757
},
"communicator.exchange": {
"total": 0.058800285000188524,
"count": 1,
"is_parallel": true,
"self": 0.058800285000188524
},
"steps_from_proto": {
"total": 0.0018624730000738055,
"count": 1,
"is_parallel": true,
"self": 0.0004945240000324702,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013679490000413352,
"count": 10,
"is_parallel": true,
"self": 0.0013679490000413352
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 204.90681642700656,
"count": 18211,
"is_parallel": true,
"self": 9.775297412978716,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.067724289029002,
"count": 18211,
"is_parallel": true,
"self": 5.067724289029002
},
"communicator.exchange": {
"total": 158.61620861299843,
"count": 18211,
"is_parallel": true,
"self": 158.61620861299843
},
"steps_from_proto": {
"total": 31.44758611200041,
"count": 18211,
"is_parallel": true,
"self": 5.653266926044807,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.7943191859556,
"count": 182110,
"is_parallel": true,
"self": 25.7943191859556
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001687509998191672,
"count": 1,
"self": 0.0001687509998191672,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 419.023415560094,
"count": 917401,
"is_parallel": true,
"self": 18.717827055168755,
"children": {
"process_trajectory": {
"total": 304.90862033892586,
"count": 917401,
"is_parallel": true,
"self": 304.24811174592605,
"children": {
"RLTrainer._checkpoint": {
"total": 0.660508592999804,
"count": 4,
"is_parallel": true,
"self": 0.660508592999804
}
}
},
"_update_policy": {
"total": 95.3969681659994,
"count": 16,
"is_parallel": true,
"self": 69.91391486399925,
"children": {
"TorchPPOOptimizer.update": {
"total": 25.483053302000144,
"count": 480,
"is_parallel": true,
"self": 25.483053302000144
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10477776100015035,
"count": 1,
"self": 0.0006585720002476592,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10411918899990269,
"count": 1,
"self": 0.10411918899990269
}
}
}
}
}
}
}