ppo-Huggy / run_logs /timers.json
Actuary's picture
Huggy
abfcb1c
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4058090448379517,
"min": 1.4058090448379517,
"max": 1.4276025295257568,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69764.6796875,
"min": 68249.4375,
"max": 78462.140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 80.53507340946166,
"min": 75.57427258805512,
"max": 403.088,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49368.0,
"min": 49251.0,
"max": 50386.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999932.0,
"min": 49761.0,
"max": 1999932.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999932.0,
"min": 49761.0,
"max": 1999932.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4285013675689697,
"min": 0.07511721551418304,
"max": 2.49534010887146,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1488.67138671875,
"min": 9.314535140991211,
"max": 1573.595703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.837991959605287,
"min": 1.7894507278838465,
"max": 4.08174509649973,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2352.689071238041,
"min": 221.89189025759697,
"max": 2529.6772092580795,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.837991959605287,
"min": 1.7894507278838465,
"max": 4.08174509649973,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2352.689071238041,
"min": 221.89189025759697,
"max": 2529.6772092580795,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015504430688451976,
"min": 0.014341776878670012,
"max": 0.020105713035930724,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04651329206535593,
"min": 0.028683553757340025,
"max": 0.057408184121353165,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05465260992447535,
"min": 0.020679547420392436,
"max": 0.06570961177349091,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16395782977342604,
"min": 0.04135909484078487,
"max": 0.17021575334171454,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6321987893000033e-06,
"min": 3.6321987893000033e-06,
"max": 0.00029528557657147503,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.089659636790001e-05,
"min": 1.089659636790001e-05,
"max": 0.0008438650687116499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121070000000003,
"min": 0.10121070000000003,
"max": 0.19842852500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30363210000000007,
"min": 0.20755995000000005,
"max": 0.5812883500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.041393000000008e-05,
"min": 7.041393000000008e-05,
"max": 0.0049215833975,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021124179000000026,
"min": 0.00021124179000000026,
"max": 0.014066288665,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683371837",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683374243"
},
"total": 2405.892582778,
"count": 1,
"self": 0.436419055999977,
"children": {
"run_training.setup": {
"total": 0.05437221100004308,
"count": 1,
"self": 0.05437221100004308
},
"TrainerController.start_learning": {
"total": 2405.401791511,
"count": 1,
"self": 4.186071214947333,
"children": {
"TrainerController._reset_env": {
"total": 4.582097319000013,
"count": 1,
"self": 4.582097319000013
},
"TrainerController.advance": {
"total": 2396.5011249580525,
"count": 232629,
"self": 4.3943976851110165,
"children": {
"env_step": {
"total": 1872.6151415629415,
"count": 232629,
"self": 1587.1029476989786,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.68808562399755,
"count": 232629,
"self": 16.793529027971488,
"children": {
"TorchPolicy.evaluate": {
"total": 265.89455659602606,
"count": 222876,
"self": 265.89455659602606
}
}
},
"workers": {
"total": 2.8241082399653124,
"count": 232629,
"self": 0.0,
"children": {
"worker_root": {
"total": 2397.4282135280396,
"count": 232629,
"is_parallel": true,
"self": 1099.7132340339667,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009016459999884319,
"count": 1,
"is_parallel": true,
"self": 0.00027024499996741724,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006314010000210146,
"count": 2,
"is_parallel": true,
"self": 0.0006314010000210146
}
}
},
"UnityEnvironment.step": {
"total": 0.03611126199996306,
"count": 1,
"is_parallel": true,
"self": 0.0003287459999228304,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001936709999768027,
"count": 1,
"is_parallel": true,
"self": 0.0001936709999768027
},
"communicator.exchange": {
"total": 0.034857786000031865,
"count": 1,
"is_parallel": true,
"self": 0.034857786000031865
},
"steps_from_proto": {
"total": 0.0007310590000315642,
"count": 1,
"is_parallel": true,
"self": 0.00023317200009387307,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004978869999376911,
"count": 2,
"is_parallel": true,
"self": 0.0004978869999376911
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1297.7149794940728,
"count": 232628,
"is_parallel": true,
"self": 38.04575125406359,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.74771636498747,
"count": 232628,
"is_parallel": true,
"self": 81.74771636498747
},
"communicator.exchange": {
"total": 1083.112979291974,
"count": 232628,
"is_parallel": true,
"self": 1083.112979291974
},
"steps_from_proto": {
"total": 94.80853258304774,
"count": 232628,
"is_parallel": true,
"self": 36.87258838005653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 57.93594420299121,
"count": 465256,
"is_parallel": true,
"self": 57.93594420299121
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 519.4915857100002,
"count": 232629,
"self": 6.64162556107658,
"children": {
"process_trajectory": {
"total": 135.3539460259231,
"count": 232629,
"self": 133.92624449692306,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4277015290000463,
"count": 10,
"self": 1.4277015290000463
}
}
},
"_update_policy": {
"total": 377.49601412300046,
"count": 97,
"self": 318.8969478819951,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.599066241005346,
"count": 2910,
"self": 58.599066241005346
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3020003279962111e-06,
"count": 1,
"self": 1.3020003279962111e-06
},
"TrainerController._save_models": {
"total": 0.1324967169998672,
"count": 1,
"self": 0.0022396829999706824,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13025703399989652,
"count": 1,
"self": 0.13025703399989652
}
}
}
}
}
}
}