ppo-Huggy / run_logs /timers.json
NatashaN's picture
Huggy
5349df8
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.405482530593872,
"min": 1.405482530593872,
"max": 1.424791932106018,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69544.6796875,
"min": 68371.609375,
"max": 78764.328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 106.22698072805139,
"min": 82.80737018425461,
"max": 387.1085271317829,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49608.0,
"min": 48957.0,
"max": 50143.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999967.0,
"min": 49518.0,
"max": 1999967.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999967.0,
"min": 49518.0,
"max": 1999967.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.346294641494751,
"min": 0.035635609179735184,
"max": 2.49809193611145,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1095.7196044921875,
"min": 4.5613579750061035,
"max": 1416.075439453125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.531945319834307,
"min": 1.7926983578363433,
"max": 3.9470971624056497,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1649.4184643626213,
"min": 229.46538980305195,
"max": 2234.227006793022,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.531945319834307,
"min": 1.7926983578363433,
"max": 3.9470971624056497,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1649.4184643626213,
"min": 229.46538980305195,
"max": 2234.227006793022,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017806474872567275,
"min": 0.01159767320981094,
"max": 0.01961268134812902,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05341942461770183,
"min": 0.02319534641962188,
"max": 0.05687368103826884,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04987910874187946,
"min": 0.02259420972938339,
"max": 0.06383788250386714,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14963732622563838,
"min": 0.04518841945876678,
"max": 0.19151364751160144,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.358398880566669e-06,
"min": 3.358398880566669e-06,
"max": 0.0002953737765420749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0075196641700006e-05,
"min": 1.0075196641700006e-05,
"max": 0.0008442282185906,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10111943333333333,
"min": 0.10111943333333333,
"max": 0.19845792500000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30335829999999997,
"min": 0.20745544999999999,
"max": 0.5814094000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.58597233333334e-05,
"min": 6.58597233333334e-05,
"max": 0.0049230504575,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001975791700000002,
"min": 0.0001975791700000002,
"max": 0.014072329060000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682188691",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682190994"
},
"total": 2303.89492211,
"count": 1,
"self": 0.38532438199990793,
"children": {
"run_training.setup": {
"total": 0.12347422300001654,
"count": 1,
"self": 0.12347422300001654
},
"TrainerController.start_learning": {
"total": 2303.3861235050003,
"count": 1,
"self": 4.152042835093653,
"children": {
"TrainerController._reset_env": {
"total": 3.8766400979999958,
"count": 1,
"self": 3.8766400979999958
},
"TrainerController.advance": {
"total": 2295.2380288829067,
"count": 232161,
"self": 4.549385524023364,
"children": {
"env_step": {
"total": 1792.8011982839705,
"count": 232161,
"self": 1517.3093090288648,
"children": {
"SubprocessEnvManager._take_step": {
"total": 272.7112766570348,
"count": 232161,
"self": 16.317214986056342,
"children": {
"TorchPolicy.evaluate": {
"total": 256.39406167097843,
"count": 222994,
"self": 256.39406167097843
}
}
},
"workers": {
"total": 2.7806125980708885,
"count": 232161,
"self": 0.0,
"children": {
"worker_root": {
"total": 2295.4051083469417,
"count": 232161,
"is_parallel": true,
"self": 1051.7638608949317,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009856209999838939,
"count": 1,
"is_parallel": true,
"self": 0.0003723279999690021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006132930000148917,
"count": 2,
"is_parallel": true,
"self": 0.0006132930000148917
}
}
},
"UnityEnvironment.step": {
"total": 0.030228788999977496,
"count": 1,
"is_parallel": true,
"self": 0.00027429099992559713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016516000005140086,
"count": 1,
"is_parallel": true,
"self": 0.00016516000005140086
},
"communicator.exchange": {
"total": 0.0289370859999849,
"count": 1,
"is_parallel": true,
"self": 0.0289370859999849
},
"steps_from_proto": {
"total": 0.0008522520000155964,
"count": 1,
"is_parallel": true,
"self": 0.00020085599999219994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006513960000233965,
"count": 2,
"is_parallel": true,
"self": 0.0006513960000233965
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1243.64124745201,
"count": 232160,
"is_parallel": true,
"self": 37.313793435171874,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.64921191493625,
"count": 232160,
"is_parallel": true,
"self": 78.64921191493625
},
"communicator.exchange": {
"total": 1040.8790674769825,
"count": 232160,
"is_parallel": true,
"self": 1040.8790674769825
},
"steps_from_proto": {
"total": 86.79917462491937,
"count": 232160,
"is_parallel": true,
"self": 32.615592934965434,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.18358168995394,
"count": 464320,
"is_parallel": true,
"self": 54.18358168995394
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 497.8874450749128,
"count": 232161,
"self": 6.466265206987657,
"children": {
"process_trajectory": {
"total": 127.89032737192605,
"count": 232161,
"self": 126.5452050839263,
"children": {
"RLTrainer._checkpoint": {
"total": 1.345122287999743,
"count": 10,
"self": 1.345122287999743
}
}
},
"_update_policy": {
"total": 363.5308524959991,
"count": 97,
"self": 305.3831644969995,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.14768799899963,
"count": 2910,
"self": 58.14768799899963
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.57999873207882e-07,
"count": 1,
"self": 9.57999873207882e-07
},
"TrainerController._save_models": {
"total": 0.11941073100024369,
"count": 1,
"self": 0.002000582000164286,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1174101490000794,
"count": 1,
"self": 0.1174101490000794
}
}
}
}
}
}
}