ppo-Huggy / run_logs /timers.json
Seif's picture
Huggy
5901ba4
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014461040496826,
"min": 1.4014461040496826,
"max": 1.4263112545013428,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70325.96875,
"min": 69106.015625,
"max": 77853.625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 100.54655870445345,
"min": 90.47802197802197,
"max": 379.3030303030303,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49670.0,
"min": 48887.0,
"max": 50118.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999513.0,
"min": 49846.0,
"max": 1999513.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999513.0,
"min": 49846.0,
"max": 1999513.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.398325204849243,
"min": 0.05879088491201401,
"max": 2.424753189086914,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1184.772705078125,
"min": 7.701605796813965,
"max": 1312.2742919921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7832813795037596,
"min": 1.9111643237922027,
"max": 3.8761406136669128,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1868.9410014748573,
"min": 250.36252641677856,
"max": 2054.027716398239,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7832813795037596,
"min": 1.9111643237922027,
"max": 3.8761406136669128,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1868.9410014748573,
"min": 250.36252641677856,
"max": 2054.027716398239,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01777137084314341,
"min": 0.012269601047592005,
"max": 0.01986560000740509,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05331411252943023,
"min": 0.02453920209518401,
"max": 0.05959680002221527,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05483203629652659,
"min": 0.022970727148155373,
"max": 0.06959378346800804,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16449610888957977,
"min": 0.045941454296310746,
"max": 0.19779911302030084,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2510989163333252e-06,
"min": 3.2510989163333252e-06,
"max": 0.0002953821765392749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.753296748999976e-06,
"min": 9.753296748999976e-06,
"max": 0.0008442009185997,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10108366666666667,
"min": 0.10108366666666667,
"max": 0.19846072500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303251,
"min": 0.20731795000000006,
"max": 0.5814003000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.407496666666655e-05,
"min": 6.407496666666655e-05,
"max": 0.0049231901775,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019222489999999964,
"min": 0.00019222489999999964,
"max": 0.014071874970000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671142466",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671144666"
},
"total": 2200.306604128,
"count": 1,
"self": 0.39385980599990944,
"children": {
"run_training.setup": {
"total": 0.10462155300001541,
"count": 1,
"self": 0.10462155300001541
},
"TrainerController.start_learning": {
"total": 2199.808122769,
"count": 1,
"self": 3.8767992311300077,
"children": {
"TrainerController._reset_env": {
"total": 10.951668418999816,
"count": 1,
"self": 10.951668418999816
},
"TrainerController.advance": {
"total": 2184.8537242808698,
"count": 231629,
"self": 4.07983758457658,
"children": {
"env_step": {
"total": 1717.0304840100962,
"count": 231629,
"self": 1440.9591626954993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.4421657818275,
"count": 231629,
"self": 14.151198919988019,
"children": {
"TorchPolicy.evaluate": {
"total": 259.2909668618395,
"count": 222914,
"self": 64.97016045277269,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.3208064090668,
"count": 222914,
"self": 194.3208064090668
}
}
}
}
},
"workers": {
"total": 2.629155532769346,
"count": 231629,
"self": 0.0,
"children": {
"worker_root": {
"total": 2192.331981880058,
"count": 231629,
"is_parallel": true,
"self": 1004.8117244022751,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002509791999727895,
"count": 1,
"is_parallel": true,
"self": 0.00035753299971474917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002152259000013146,
"count": 2,
"is_parallel": true,
"self": 0.002152259000013146
}
}
},
"UnityEnvironment.step": {
"total": 0.026707233000252018,
"count": 1,
"is_parallel": true,
"self": 0.0002682710005501576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018735000003289315,
"count": 1,
"is_parallel": true,
"self": 0.00018735000003289315
},
"communicator.exchange": {
"total": 0.025524968999889097,
"count": 1,
"is_parallel": true,
"self": 0.025524968999889097
},
"steps_from_proto": {
"total": 0.0007266429997798696,
"count": 1,
"is_parallel": true,
"self": 0.0002526910002416116,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00047395199953825795,
"count": 2,
"is_parallel": true,
"self": 0.00047395199953825795
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1187.5202574777827,
"count": 231628,
"is_parallel": true,
"self": 34.45666062989858,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.53918168305427,
"count": 231628,
"is_parallel": true,
"self": 74.53918168305427
},
"communicator.exchange": {
"total": 986.8723276979204,
"count": 231628,
"is_parallel": true,
"self": 986.8723276979204
},
"steps_from_proto": {
"total": 91.6520874669095,
"count": 231628,
"is_parallel": true,
"self": 37.67981445969008,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.97227300721943,
"count": 463256,
"is_parallel": true,
"self": 53.97227300721943
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.743402686197,
"count": 231629,
"self": 5.868429193384145,
"children": {
"process_trajectory": {
"total": 144.5552189158152,
"count": 231629,
"self": 143.39169291281496,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1635260030002428,
"count": 10,
"self": 1.1635260030002428
}
}
},
"_update_policy": {
"total": 313.31975457699764,
"count": 97,
"self": 259.8355766369955,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.484177940002155,
"count": 2910,
"self": 53.484177940002155
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.580000212532468e-07,
"count": 1,
"self": 8.580000212532468e-07
},
"TrainerController._save_models": {
"total": 0.1259299800003646,
"count": 1,
"self": 0.0026398760001029586,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12329010400026164,
"count": 1,
"self": 0.12329010400026164
}
}
}
}
}
}
}