ppo-Huggy / run_logs /timers.json
alex-daly's picture
Huggy
cefa39e verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4084656238555908,
"min": 1.408464789390564,
"max": 1.4248017072677612,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71214.8359375,
"min": 67520.6796875,
"max": 79134.84375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.80887372013652,
"min": 80.82372322899506,
"max": 394.748031496063,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49112.0,
"min": 49040.0,
"max": 50133.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999933.0,
"min": 49852.0,
"max": 1999933.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999933.0,
"min": 49852.0,
"max": 1999933.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.386366844177246,
"min": 0.18413379788398743,
"max": 2.4568214416503906,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1398.4110107421875,
"min": 23.20085906982422,
"max": 1448.438720703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.740368860878635,
"min": 1.7339775150730496,
"max": 3.952665875612149,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2191.85615247488,
"min": 218.48116689920425,
"max": 2275.586290895939,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.740368860878635,
"min": 1.7339775150730496,
"max": 3.952665875612149,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2191.85615247488,
"min": 218.48116689920425,
"max": 2275.586290895939,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017363197852521103,
"min": 0.012961733196364851,
"max": 0.021495015439359125,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052089593557563305,
"min": 0.025923466392729702,
"max": 0.05568683120169832,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.0571269610689746,
"min": 0.020052833513667184,
"max": 0.0571269610689746,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1713808832069238,
"min": 0.04010566702733437,
"max": 0.1713808832069238,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1552989482666694e-06,
"min": 3.1552989482666694e-06,
"max": 0.00029530507656497495,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.465896844800008e-06,
"min": 9.465896844800008e-06,
"max": 0.0008437161187612998,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105173333333335,
"min": 0.10105173333333335,
"max": 0.19843502499999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30315520000000007,
"min": 0.20726825000000004,
"max": 0.5812387,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.248149333333337e-05,
"min": 6.248149333333337e-05,
"max": 0.004921907747500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018744448000000013,
"min": 0.00018744448000000013,
"max": 0.01406381113,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1736872895",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1736875367"
},
"total": 2472.103869478,
"count": 1,
"self": 0.7962219999999434,
"children": {
"run_training.setup": {
"total": 0.056608844000038516,
"count": 1,
"self": 0.056608844000038516
},
"TrainerController.start_learning": {
"total": 2471.251038634,
"count": 1,
"self": 4.515755521958454,
"children": {
"TrainerController._reset_env": {
"total": 5.558615320000058,
"count": 1,
"self": 5.558615320000058
},
"TrainerController.advance": {
"total": 2461.0047373340412,
"count": 232131,
"self": 4.6291173141153195,
"children": {
"env_step": {
"total": 1946.3171769289338,
"count": 232131,
"self": 1526.6615936870746,
"children": {
"SubprocessEnvManager._take_step": {
"total": 416.8059036908886,
"count": 232131,
"self": 15.938427334900098,
"children": {
"TorchPolicy.evaluate": {
"total": 400.8674763559885,
"count": 223002,
"self": 400.8674763559885
}
}
},
"workers": {
"total": 2.8496795509705635,
"count": 232131,
"self": 0.0,
"children": {
"worker_root": {
"total": 2463.6085722709295,
"count": 232131,
"is_parallel": true,
"self": 1224.2004890179296,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010286909999877025,
"count": 1,
"is_parallel": true,
"self": 0.0003123999999843363,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007162910000033662,
"count": 2,
"is_parallel": true,
"self": 0.0007162910000033662
}
}
},
"UnityEnvironment.step": {
"total": 0.028131632999929934,
"count": 1,
"is_parallel": true,
"self": 0.00036415599993233627,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021129300000666262,
"count": 1,
"is_parallel": true,
"self": 0.00021129300000666262
},
"communicator.exchange": {
"total": 0.0268100289999893,
"count": 1,
"is_parallel": true,
"self": 0.0268100289999893
},
"steps_from_proto": {
"total": 0.0007461550000016359,
"count": 1,
"is_parallel": true,
"self": 0.00020099100004244974,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005451639999591862,
"count": 2,
"is_parallel": true,
"self": 0.0005451639999591862
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1239.4080832529999,
"count": 232130,
"is_parallel": true,
"self": 37.78052068797297,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.76638818799393,
"count": 232130,
"is_parallel": true,
"self": 81.76638818799393
},
"communicator.exchange": {
"total": 1029.218826475008,
"count": 232130,
"is_parallel": true,
"self": 1029.218826475008
},
"steps_from_proto": {
"total": 90.642347902025,
"count": 232130,
"is_parallel": true,
"self": 34.20023515394223,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.442112748082764,
"count": 464260,
"is_parallel": true,
"self": 56.442112748082764
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 510.0584430909919,
"count": 232131,
"self": 6.895628195961876,
"children": {
"process_trajectory": {
"total": 170.91541658203107,
"count": 232131,
"self": 169.5426672040304,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3727493780006625,
"count": 10,
"self": 1.3727493780006625
}
}
},
"_update_policy": {
"total": 332.24739831299894,
"count": 97,
"self": 265.83523305301026,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.41216525998868,
"count": 2910,
"self": 66.41216525998868
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.500000053056283e-06,
"count": 1,
"self": 1.500000053056283e-06
},
"TrainerController._save_models": {
"total": 0.17192895800008046,
"count": 1,
"self": 0.00300871100034783,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16892024699973263,
"count": 1,
"self": 0.16892024699973263
}
}
}
}
}
}
}