ppo-Huggy / run_logs /timers.json
f171p's picture
Huggy
cfd1fbb verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.3988615274429321,
"min": 1.3988615274429321,
"max": 1.4288098812103271,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69482.8515625,
"min": 67213.265625,
"max": 77093.3671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 103.19047619047619,
"min": 89.4068100358423,
"max": 378.57142857142856,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49841.0,
"min": 48847.0,
"max": 50350.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999666.0,
"min": 49981.0,
"max": 1999666.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999666.0,
"min": 49981.0,
"max": 1999666.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3524415493011475,
"min": 0.17352251708507538,
"max": 2.4141829013824463,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1136.229248046875,
"min": 22.904972076416016,
"max": 1325.683837890625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.628023512491775,
"min": 1.895293135308858,
"max": 3.8379439457933953,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1752.3353565335274,
"min": 250.17869386076927,
"max": 2065.111233651638,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.628023512491775,
"min": 1.895293135308858,
"max": 3.8379439457933953,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1752.3353565335274,
"min": 250.17869386076927,
"max": 2065.111233651638,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018601314288874463,
"min": 0.013675588071904105,
"max": 0.02005452904183282,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05580394286662339,
"min": 0.02841165805681764,
"max": 0.060163587125498456,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.049751184259851776,
"min": 0.02086960996190707,
"max": 0.06343954900900522,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14925355277955532,
"min": 0.04173921992381414,
"max": 0.18166482659677663,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3923988692333368e-06,
"min": 3.3923988692333368e-06,
"max": 0.00029536695154434996,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.017719660770001e-05,
"min": 1.017719660770001e-05,
"max": 0.0008443515185494999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10113076666666669,
"min": 0.10113076666666669,
"max": 0.19845565,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30339230000000006,
"min": 0.20747705000000002,
"max": 0.5814505000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.642525666666674e-05,
"min": 6.642525666666674e-05,
"max": 0.004922936935,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001992757700000002,
"min": 0.0001992757700000002,
"max": 0.01407437995,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719476353",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719478990"
},
"total": 2637.6870888900003,
"count": 1,
"self": 0.6478866690003997,
"children": {
"run_training.setup": {
"total": 0.06202444199971069,
"count": 1,
"self": 0.06202444199971069
},
"TrainerController.start_learning": {
"total": 2636.977177779,
"count": 1,
"self": 4.548473000444574,
"children": {
"TrainerController._reset_env": {
"total": 3.1775862840004265,
"count": 1,
"self": 3.1775862840004265
},
"TrainerController.advance": {
"total": 2629.135095697554,
"count": 231378,
"self": 4.903872693826997,
"children": {
"env_step": {
"total": 1995.5722655441114,
"count": 231378,
"self": 1651.9168425497555,
"children": {
"SubprocessEnvManager._take_step": {
"total": 340.5914986825692,
"count": 231378,
"self": 19.0249461323574,
"children": {
"TorchPolicy.evaluate": {
"total": 321.5665525502118,
"count": 222894,
"self": 321.5665525502118
}
}
},
"workers": {
"total": 3.06392431178665,
"count": 231378,
"self": 0.0,
"children": {
"worker_root": {
"total": 2629.670619867793,
"count": 231378,
"is_parallel": true,
"self": 1302.679187192908,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009874920006041066,
"count": 1,
"is_parallel": true,
"self": 0.00021345800087146927,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007740339997326373,
"count": 2,
"is_parallel": true,
"self": 0.0007740339997326373
}
}
},
"UnityEnvironment.step": {
"total": 0.04885909700078628,
"count": 1,
"is_parallel": true,
"self": 0.00037628500194841763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002695079992918181,
"count": 1,
"is_parallel": true,
"self": 0.0002695079992918181
},
"communicator.exchange": {
"total": 0.04746298000009119,
"count": 1,
"is_parallel": true,
"self": 0.04746298000009119
},
"steps_from_proto": {
"total": 0.0007503239994548494,
"count": 1,
"is_parallel": true,
"self": 0.00019554099981178297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005547829996430664,
"count": 2,
"is_parallel": true,
"self": 0.0005547829996430664
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1326.991432674885,
"count": 231377,
"is_parallel": true,
"self": 40.38995333843377,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.5688877655466,
"count": 231377,
"is_parallel": true,
"self": 84.5688877655466
},
"communicator.exchange": {
"total": 1108.4599302190572,
"count": 231377,
"is_parallel": true,
"self": 1108.4599302190572
},
"steps_from_proto": {
"total": 93.57266135184727,
"count": 231377,
"is_parallel": true,
"self": 35.06112458136613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.51153677048114,
"count": 462754,
"is_parallel": true,
"self": 58.51153677048114
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 628.6589574596155,
"count": 231378,
"self": 7.0049405725576435,
"children": {
"process_trajectory": {
"total": 294.636735075067,
"count": 231378,
"self": 162.6559920700829,
"children": {
"RLTrainer._checkpoint": {
"total": 131.9807430049841,
"count": 1000,
"self": 131.9807430049841
}
}
},
"_update_policy": {
"total": 327.01728181199087,
"count": 97,
"self": 263.4767465570121,
"children": {
"TorchPPOOptimizer.update": {
"total": 63.54053525497875,
"count": 2910,
"self": 63.54053525497875
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.380000847158954e-07,
"count": 1,
"self": 9.380000847158954e-07
},
"TrainerController._save_models": {
"total": 0.11602185900119366,
"count": 1,
"self": 0.002248808001240832,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11377305099995283,
"count": 1,
"self": 0.11377305099995283
}
}
}
}
}
}
}