ppo-Huggy / run_logs /timers.json
Ammok's picture
Huggy
eb66798
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.404180645942688,
"min": 1.404180645942688,
"max": 1.4261080026626587,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69667.015625,
"min": 67336.1875,
"max": 78631.96875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 72.05263157894737,
"min": 70.45506419400856,
"max": 376.1578947368421,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49284.0,
"min": 49269.0,
"max": 50029.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999955.0,
"min": 49799.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999955.0,
"min": 49799.0,
"max": 1999955.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5387959480285645,
"min": 0.15343888103961945,
"max": 2.552877426147461,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1736.536376953125,
"min": 20.25393295288086,
"max": 1789.567138671875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.935024808460509,
"min": 1.766182921939727,
"max": 4.051004832178037,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2691.556968986988,
"min": 233.13614569604397,
"max": 2773.1334426403046,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.935024808460509,
"min": 1.766182921939727,
"max": 4.051004832178037,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2691.556968986988,
"min": 233.13614569604397,
"max": 2773.1334426403046,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015716550530568283,
"min": 0.01299585214130477,
"max": 0.02033178857786374,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04714965159170485,
"min": 0.02781088723835031,
"max": 0.06099536573359122,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.061764139764838744,
"min": 0.02119358411679665,
"max": 0.0643432676171263,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18529241929451623,
"min": 0.0423871682335933,
"max": 0.19156461556752524,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7420987526666554e-06,
"min": 3.7420987526666554e-06,
"max": 0.000295341376552875,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1226296257999966e-05,
"min": 1.1226296257999966e-05,
"max": 0.0008441319186227,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10124733333333334,
"min": 0.10124733333333334,
"max": 0.198447125,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.303742,
"min": 0.20762255000000002,
"max": 0.5813773,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.224193333333315e-05,
"min": 7.224193333333315e-05,
"max": 0.0049225115375,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021672579999999946,
"min": 0.00021672579999999946,
"max": 0.014070727270000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690612902",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690615347"
},
"total": 2445.3520482470003,
"count": 1,
"self": 0.4392772880000848,
"children": {
"run_training.setup": {
"total": 0.05424183900004209,
"count": 1,
"self": 0.05424183900004209
},
"TrainerController.start_learning": {
"total": 2444.85852912,
"count": 1,
"self": 4.256744909997451,
"children": {
"TrainerController._reset_env": {
"total": 6.124277768999946,
"count": 1,
"self": 6.124277768999946
},
"TrainerController.advance": {
"total": 2434.3615539680027,
"count": 233487,
"self": 4.572611847006101,
"children": {
"env_step": {
"total": 1874.1807071879573,
"count": 233487,
"self": 1587.2439632172677,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.07363352879884,
"count": 233487,
"self": 16.219268964802154,
"children": {
"TorchPolicy.evaluate": {
"total": 267.8543645639967,
"count": 222928,
"self": 267.8543645639967
}
}
},
"workers": {
"total": 2.8631104418908535,
"count": 233487,
"self": 0.0,
"children": {
"worker_root": {
"total": 2437.1969208710316,
"count": 233487,
"is_parallel": true,
"self": 1139.7395984750142,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009796729999607123,
"count": 1,
"is_parallel": true,
"self": 0.00028666900004736817,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006930039999133442,
"count": 2,
"is_parallel": true,
"self": 0.0006930039999133442
}
}
},
"UnityEnvironment.step": {
"total": 0.028781102999914765,
"count": 1,
"is_parallel": true,
"self": 0.00034978799976670416,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021273200002269732,
"count": 1,
"is_parallel": true,
"self": 0.00021273200002269732
},
"communicator.exchange": {
"total": 0.027516562000073463,
"count": 1,
"is_parallel": true,
"self": 0.027516562000073463
},
"steps_from_proto": {
"total": 0.0007020210000519,
"count": 1,
"is_parallel": true,
"self": 0.00019708800004991645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005049330000019836,
"count": 2,
"is_parallel": true,
"self": 0.0005049330000019836
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1297.4573223960174,
"count": 233486,
"is_parallel": true,
"self": 40.356356921829956,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.68260104201886,
"count": 233486,
"is_parallel": true,
"self": 80.68260104201886
},
"communicator.exchange": {
"total": 1079.8964378600895,
"count": 233486,
"is_parallel": true,
"self": 1079.8964378600895
},
"steps_from_proto": {
"total": 96.521926572079,
"count": 233486,
"is_parallel": true,
"self": 34.9602620350704,
"children": {
"_process_rank_one_or_two_observation": {
"total": 61.561664537008596,
"count": 466972,
"is_parallel": true,
"self": 61.561664537008596
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 555.6082349330391,
"count": 233487,
"self": 6.453862017010238,
"children": {
"process_trajectory": {
"total": 142.08316986702619,
"count": 233487,
"self": 140.72193935902612,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3612305080000624,
"count": 10,
"self": 1.3612305080000624
}
}
},
"_update_policy": {
"total": 407.07120304900263,
"count": 97,
"self": 347.5385899989965,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.53261305000615,
"count": 2910,
"self": 59.53261305000615
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.460001481580548e-07,
"count": 1,
"self": 8.460001481580548e-07
},
"TrainerController._save_models": {
"total": 0.11595162699995853,
"count": 1,
"self": 0.0026923469999928784,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11325927999996566,
"count": 1,
"self": 0.11325927999996566
}
}
}
}
}
}
}