ppo-Huggy / run_logs /timers.json
rlsn's picture
Huggy
f26e58d
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4114099740982056,
"min": 1.4114099740982056,
"max": 1.4298384189605713,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 68766.71875,
"min": 68356.2578125,
"max": 75076.9921875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.02409638554217,
"min": 80.6557911908646,
"max": 434.1565217391304,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49399.0,
"min": 48981.0,
"max": 50177.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999973.0,
"min": 49939.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999973.0,
"min": 49939.0,
"max": 1999973.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4239814281463623,
"min": 0.13998527824878693,
"max": 2.5330557823181152,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1408.333251953125,
"min": 15.958321571350098,
"max": 1485.0008544921875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7105679547930337,
"min": 1.825975762386071,
"max": 4.016443850044851,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2155.8399817347527,
"min": 208.1612369120121,
"max": 2346.67379373312,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7105679547930337,
"min": 1.825975762386071,
"max": 4.016443850044851,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2155.8399817347527,
"min": 208.1612369120121,
"max": 2346.67379373312,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018605972112466892,
"min": 0.014677382057804304,
"max": 0.019650975245410034,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05581791633740067,
"min": 0.029354764115608608,
"max": 0.05796542349271476,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05652332231402398,
"min": 0.021131427896519502,
"max": 0.06036304260293643,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16956996694207194,
"min": 0.042262855793039004,
"max": 0.1719430681318045,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.2859989046999976e-06,
"min": 3.2859989046999976e-06,
"max": 0.00029530305156564993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.857996714099993e-06,
"min": 9.857996714099993e-06,
"max": 0.0008441659686113499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10109529999999999,
"min": 0.10109529999999999,
"max": 0.19843434999999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30328589999999994,
"min": 0.20731635000000004,
"max": 0.5813886499999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.465546999999998e-05,
"min": 6.465546999999998e-05,
"max": 0.004921874065,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00019396640999999994,
"min": 0.00019396640999999994,
"max": 0.014071293634999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682317449",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1682320001"
},
"total": 2552.0155502860002,
"count": 1,
"self": 0.42630479399986143,
"children": {
"run_training.setup": {
"total": 0.19609793000000764,
"count": 1,
"self": 0.19609793000000764
},
"TrainerController.start_learning": {
"total": 2551.393147562,
"count": 1,
"self": 4.92178830996545,
"children": {
"TrainerController._reset_env": {
"total": 5.110419902000018,
"count": 1,
"self": 5.110419902000018
},
"TrainerController.advance": {
"total": 2541.243485090035,
"count": 232345,
"self": 5.025650965030309,
"children": {
"env_step": {
"total": 1999.0848792759816,
"count": 232345,
"self": 1696.8382502419784,
"children": {
"SubprocessEnvManager._take_step": {
"total": 299.1468781500056,
"count": 232345,
"self": 18.11195785101728,
"children": {
"TorchPolicy.evaluate": {
"total": 281.0349202989883,
"count": 222880,
"self": 281.0349202989883
}
}
},
"workers": {
"total": 3.099750883997615,
"count": 232345,
"self": 0.0,
"children": {
"worker_root": {
"total": 2542.506253111994,
"count": 232345,
"is_parallel": true,
"self": 1150.9842375240473,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012650449999966895,
"count": 1,
"is_parallel": true,
"self": 0.0004903179999757867,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007747270000209028,
"count": 2,
"is_parallel": true,
"self": 0.0007747270000209028
}
}
},
"UnityEnvironment.step": {
"total": 0.02941524799999229,
"count": 1,
"is_parallel": true,
"self": 0.0002924189999760074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002477529999964645,
"count": 1,
"is_parallel": true,
"self": 0.0002477529999964645
},
"communicator.exchange": {
"total": 0.02816876500000376,
"count": 1,
"is_parallel": true,
"self": 0.02816876500000376
},
"steps_from_proto": {
"total": 0.0007063110000160577,
"count": 1,
"is_parallel": true,
"self": 0.00024650400001746675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000459806999998591,
"count": 2,
"is_parallel": true,
"self": 0.000459806999998591
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1391.5220155879465,
"count": 232344,
"is_parallel": true,
"self": 40.42578258092044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.25921820599766,
"count": 232344,
"is_parallel": true,
"self": 89.25921820599766
},
"communicator.exchange": {
"total": 1165.938367906087,
"count": 232344,
"is_parallel": true,
"self": 1165.938367906087
},
"steps_from_proto": {
"total": 95.89864689494124,
"count": 232344,
"is_parallel": true,
"self": 38.671679690892915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.22696720404832,
"count": 464688,
"is_parallel": true,
"self": 57.22696720404832
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 537.1329548490228,
"count": 232345,
"self": 7.256562498021026,
"children": {
"process_trajectory": {
"total": 144.5748559340012,
"count": 232345,
"self": 143.2166207870013,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3582351469999026,
"count": 10,
"self": 1.3582351469999026
}
}
},
"_update_policy": {
"total": 385.30153641700053,
"count": 97,
"self": 325.3049683669899,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.99656805001061,
"count": 2910,
"self": 59.99656805001061
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.589998626324814e-07,
"count": 1,
"self": 9.589998626324814e-07
},
"TrainerController._save_models": {
"total": 0.1174533009998413,
"count": 1,
"self": 0.002585920999990776,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11486737999985053,
"count": 1,
"self": 0.11486737999985053
}
}
}
}
}
}
}