ppo-Huggy / run_logs /timers.json
Jackman4399's picture
Huggy
af65ae1
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4062604904174805,
"min": 1.4062604904174805,
"max": 1.4324147701263428,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70200.5234375,
"min": 68738.5546875,
"max": 77371.2890625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.44055944055944,
"min": 79.56279809220986,
"max": 377.5338345864662,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49444.0,
"min": 48690.0,
"max": 50212.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999994.0,
"min": 49873.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999994.0,
"min": 49873.0,
"max": 1999994.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4068872928619385,
"min": 0.1387336552143097,
"max": 2.4684431552886963,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1376.739501953125,
"min": 18.312843322753906,
"max": 1514.189697265625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.754760346733607,
"min": 1.8705243037054033,
"max": 3.940543509074274,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2147.722918331623,
"min": 246.90920808911324,
"max": 2405.2888628840446,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.754760346733607,
"min": 1.8705243037054033,
"max": 3.940543509074274,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2147.722918331623,
"min": 246.90920808911324,
"max": 2405.2888628840446,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01599954287392191,
"min": 0.013908593161290304,
"max": 0.020718430487229652,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047998628621765724,
"min": 0.02827213109000392,
"max": 0.06215529146168895,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05098196607497003,
"min": 0.022084902940938872,
"max": 0.06093873054616981,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1529458982249101,
"min": 0.044169805881877744,
"max": 0.18281619163850943,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.743998752033333e-06,
"min": 3.743998752033333e-06,
"max": 0.0002953830015389999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.12319962561e-05,
"min": 1.12319962561e-05,
"max": 0.0008444775185074999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10124796666666665,
"min": 0.10124796666666665,
"max": 0.198461,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30374389999999996,
"min": 0.20762295000000006,
"max": 0.5814925000000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.227353666666666e-05,
"min": 7.227353666666666e-05,
"max": 0.004923203900000001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021682061,
"min": 0.00021682061,
"max": 0.014076475750000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702998823",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703001300"
},
"total": 2477.01634281,
"count": 1,
"self": 0.4472974139998769,
"children": {
"run_training.setup": {
"total": 0.05334533699999611,
"count": 1,
"self": 0.05334533699999611
},
"TrainerController.start_learning": {
"total": 2476.515700059,
"count": 1,
"self": 4.733442952002406,
"children": {
"TrainerController._reset_env": {
"total": 2.9109945540000126,
"count": 1,
"self": 2.9109945540000126
},
"TrainerController.advance": {
"total": 2468.759212921998,
"count": 232351,
"self": 4.9020434200128875,
"children": {
"env_step": {
"total": 1985.8316792069354,
"count": 232351,
"self": 1650.1121129379374,
"children": {
"SubprocessEnvManager._take_step": {
"total": 332.6850955529713,
"count": 232351,
"self": 17.309015237011295,
"children": {
"TorchPolicy.evaluate": {
"total": 315.37608031596,
"count": 222964,
"self": 315.37608031596
}
}
},
"workers": {
"total": 3.0344707160267035,
"count": 232351,
"self": 0.0,
"children": {
"worker_root": {
"total": 2469.079813869947,
"count": 232351,
"is_parallel": true,
"self": 1126.6406232279135,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0006550870000410214,
"count": 1,
"is_parallel": true,
"self": 0.00021256400003721865,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00044252300000380274,
"count": 2,
"is_parallel": true,
"self": 0.00044252300000380274
}
}
},
"UnityEnvironment.step": {
"total": 0.03072863900001721,
"count": 1,
"is_parallel": true,
"self": 0.0003024710000545383,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020063599998820791,
"count": 1,
"is_parallel": true,
"self": 0.00020063599998820791
},
"communicator.exchange": {
"total": 0.029504810999981146,
"count": 1,
"is_parallel": true,
"self": 0.029504810999981146
},
"steps_from_proto": {
"total": 0.0007207209999933184,
"count": 1,
"is_parallel": true,
"self": 0.00020083099997236786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005198900000209505,
"count": 2,
"is_parallel": true,
"self": 0.0005198900000209505
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1342.4391906420335,
"count": 232350,
"is_parallel": true,
"self": 41.87431814409706,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 87.90931651598106,
"count": 232350,
"is_parallel": true,
"self": 87.90931651598106
},
"communicator.exchange": {
"total": 1119.2608126559344,
"count": 232350,
"is_parallel": true,
"self": 1119.2608126559344
},
"steps_from_proto": {
"total": 93.39474332602094,
"count": 232350,
"is_parallel": true,
"self": 34.84276610098544,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.5519772250355,
"count": 464700,
"is_parallel": true,
"self": 58.5519772250355
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 478.0254902950495,
"count": 232351,
"self": 6.889042008086051,
"children": {
"process_trajectory": {
"total": 157.78553228996378,
"count": 232351,
"self": 156.61181004296367,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1737222470001143,
"count": 10,
"self": 1.1737222470001143
}
}
},
"_update_policy": {
"total": 313.3509159969997,
"count": 97,
"self": 251.59031077299727,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.760605224002404,
"count": 2910,
"self": 61.760605224002404
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.51999936660286e-07,
"count": 1,
"self": 9.51999936660286e-07
},
"TrainerController._save_models": {
"total": 0.11204867900005411,
"count": 1,
"self": 0.0020441090000531403,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11000457000000097,
"count": 1,
"self": 0.11000457000000097
}
}
}
}
}
}
}