ppo-Huggy / run_logs /timers.json
Rujord's picture
Huggy
f9e94f7 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4046581983566284,
"min": 1.4046581983566284,
"max": 1.4288735389709473,
"count": 37
},
"Huggy.Policy.Entropy.sum": {
"value": 69546.03125,
"min": 20280.783203125,
"max": 72500.1953125,
"count": 37
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.3765541740675,
"min": 86.24432809773124,
"max": 169.15714285714284,
"count": 37
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49756.0,
"min": 11841.0,
"max": 50064.0,
"count": 37
},
"Huggy.Step.mean": {
"value": 1999641.0,
"min": 199737.0,
"max": 1999641.0,
"count": 37
},
"Huggy.Step.sum": {
"value": 1999641.0,
"min": 199737.0,
"max": 1999641.0,
"count": 37
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3761110305786133,
"min": 1.1079084873199463,
"max": 2.477832555770874,
"count": 37
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1337.75048828125,
"min": 76.44568634033203,
"max": 1382.6043701171875,
"count": 37
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7142520500330476,
"min": 3.233612756798233,
"max": 4.083141746870968,
"count": 37
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2091.123904168606,
"min": 223.11928021907806,
"max": 2169.011073887348,
"count": 37
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7142520500330476,
"min": 3.233612756798233,
"max": 4.083141746870968,
"count": 37
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2091.123904168606,
"min": 223.11928021907806,
"max": 2169.011073887348,
"count": 37
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01678155496677694,
"min": 0.01335351871578799,
"max": 0.01900544275283917,
"count": 36
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03356310993355388,
"min": 0.02670703743157598,
"max": 0.05701632825851751,
"count": 36
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05527746751904487,
"min": 0.028692939774029785,
"max": 0.061245533451437954,
"count": 36
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11055493503808975,
"min": 0.06962104141712189,
"max": 0.17249171572426955,
"count": 36
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.458623513825003e-06,
"min": 4.458623513825003e-06,
"max": 0.0002656282614572499,
"count": 36
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.917247027650007e-06,
"min": 8.917247027650007e-06,
"max": 0.0007968847843717497,
"count": 36
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10148617500000001,
"min": 0.10148617500000001,
"max": 0.18854274999999998,
"count": 36
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20297235000000002,
"min": 0.20297235000000002,
"max": 0.5656282499999999,
"count": 36
},
"Huggy.Policy.Beta.mean": {
"value": 8.416013250000003e-05,
"min": 8.416013250000003e-05,
"max": 0.004428283224999999,
"count": 36
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016832026500000007,
"min": 0.00016832026500000007,
"max": 0.013284849674999998,
"count": 36
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1765711367",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/ruedi/miniconda3/envs/bonus-unit1/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1765713359"
},
"total": 1991.9520865100003,
"count": 1,
"self": 0.3770263450003313,
"children": {
"run_training.setup": {
"total": 0.02162084900010086,
"count": 1,
"self": 0.02162084900010086
},
"TrainerController.start_learning": {
"total": 1991.5534393159999,
"count": 1,
"self": 3.306768411188159,
"children": {
"TrainerController._reset_env": {
"total": 5.502229875000012,
"count": 1,
"self": 5.502229875000012
},
"TrainerController.advance": {
"total": 1982.643391713812,
"count": 210896,
"self": 3.4724391328652473,
"children": {
"env_step": {
"total": 1614.688717578921,
"count": 210896,
"self": 1251.7618192719658,
"children": {
"SubprocessEnvManager._take_step": {
"total": 360.8710456529436,
"count": 210896,
"self": 11.1964748229434,
"children": {
"TorchPolicy.evaluate": {
"total": 349.6745708300002,
"count": 201985,
"self": 349.6745708300002
}
}
},
"workers": {
"total": 2.05585265401146,
"count": 210896,
"self": 0.0,
"children": {
"worker_root": {
"total": 1984.7546666817473,
"count": 210896,
"is_parallel": true,
"self": 949.714331834706,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020051829999374604,
"count": 1,
"is_parallel": true,
"self": 0.0005870779998531361,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014181050000843243,
"count": 2,
"is_parallel": true,
"self": 0.0014181050000843243
}
}
},
"UnityEnvironment.step": {
"total": 0.02355287899990799,
"count": 1,
"is_parallel": true,
"self": 0.0004493489998367295,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001588959999025974,
"count": 1,
"is_parallel": true,
"self": 0.0001588959999025974
},
"communicator.exchange": {
"total": 0.021797386000116603,
"count": 1,
"is_parallel": true,
"self": 0.021797386000116603
},
"steps_from_proto": {
"total": 0.001147248000052059,
"count": 1,
"is_parallel": true,
"self": 0.00035248400013188075,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007947639999201783,
"count": 2,
"is_parallel": true,
"self": 0.0007947639999201783
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1035.0403348470413,
"count": 210895,
"is_parallel": true,
"self": 34.34172308829943,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 67.95484381688084,
"count": 210895,
"is_parallel": true,
"self": 67.95484381688084
},
"communicator.exchange": {
"total": 856.0784549358873,
"count": 210895,
"is_parallel": true,
"self": 856.0784549358873
},
"steps_from_proto": {
"total": 76.66531300597376,
"count": 210895,
"is_parallel": true,
"self": 29.789570202906134,
"children": {
"_process_rank_one_or_two_observation": {
"total": 46.875742803067624,
"count": 421790,
"is_parallel": true,
"self": 46.875742803067624
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 364.48223500202585,
"count": 210896,
"self": 4.403106888993307,
"children": {
"process_trajectory": {
"total": 128.7970719650291,
"count": 210896,
"self": 127.76567834902858,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0313936160005142,
"count": 10,
"self": 1.0313936160005142
}
}
},
"_update_policy": {
"total": 231.28205614800345,
"count": 87,
"self": 180.69519561901484,
"children": {
"TorchPPOOptimizer.update": {
"total": 50.58686052898861,
"count": 2610,
"self": 50.58686052898861
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1269999049545731e-06,
"count": 1,
"self": 1.1269999049545731e-06
},
"TrainerController._save_models": {
"total": 0.10104818899981183,
"count": 1,
"self": 0.0014670659998046176,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09958112300000721,
"count": 1,
"self": 0.09958112300000721
}
}
}
}
}
}
}