ppo-Huggy / run_logs /timers.json
jnacey2's picture
Huggy
c186e28
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4079722166061401,
"min": 1.4079722166061401,
"max": 1.4315378665924072,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69877.6640625,
"min": 68733.7109375,
"max": 77918.609375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.52508960573476,
"min": 78.9664,
"max": 381.29007633587787,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49397.0,
"min": 48983.0,
"max": 50201.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999930.0,
"min": 49501.0,
"max": 1999930.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999930.0,
"min": 49501.0,
"max": 1999930.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4398248195648193,
"min": 0.060123853385448456,
"max": 2.4667282104492188,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1361.4222412109375,
"min": 7.81610107421875,
"max": 1503.6253662109375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7282522656157027,
"min": 1.9014848898236567,
"max": 3.9366367850390778,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2080.364764213562,
"min": 247.1930356770754,
"max": 2369.855344593525,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7282522656157027,
"min": 1.9014848898236567,
"max": 3.9366367850390778,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2080.364764213562,
"min": 247.1930356770754,
"max": 2369.855344593525,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015625600349742145,
"min": 0.012922528245932579,
"max": 0.019305834575303986,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04687680104922644,
"min": 0.025845056491865158,
"max": 0.05766584449544704,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05181531897849507,
"min": 0.021403979416936635,
"max": 0.06343161654141215,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1554459569354852,
"min": 0.04280795883387327,
"max": 0.19029484962423643,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.870498709866662e-06,
"min": 3.870498709866662e-06,
"max": 0.0002953572015476,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1611496129599986e-05,
"min": 1.1611496129599986e-05,
"max": 0.0008440495686501499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10129013333333331,
"min": 0.10129013333333331,
"max": 0.19845240000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30387039999999993,
"min": 0.2077868,
"max": 0.5813498500000002,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.437765333333326e-05,
"min": 7.437765333333326e-05,
"max": 0.00492277476,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002231329599999998,
"min": 0.0002231329599999998,
"max": 0.014069357515,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1672777978",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1672780337"
},
"total": 2359.2796870899997,
"count": 1,
"self": 0.39930682099975456,
"children": {
"run_training.setup": {
"total": 0.10337660199991205,
"count": 1,
"self": 0.10337660199991205
},
"TrainerController.start_learning": {
"total": 2358.777003667,
"count": 1,
"self": 4.7002911831382335,
"children": {
"TrainerController._reset_env": {
"total": 8.106044945000008,
"count": 1,
"self": 8.106044945000008
},
"TrainerController.advance": {
"total": 2345.8522785998616,
"count": 232420,
"self": 4.451382925737107,
"children": {
"env_step": {
"total": 1862.1310798770623,
"count": 232420,
"self": 1561.0445430999598,
"children": {
"SubprocessEnvManager._take_step": {
"total": 298.08024995500466,
"count": 232420,
"self": 15.330904214068596,
"children": {
"TorchPolicy.evaluate": {
"total": 282.74934574093606,
"count": 222928,
"self": 70.2750317020101,
"children": {
"TorchPolicy.sample_actions": {
"total": 212.47431403892597,
"count": 222928,
"self": 212.47431403892597
}
}
}
}
},
"workers": {
"total": 3.006286822097991,
"count": 232420,
"self": 0.0,
"children": {
"worker_root": {
"total": 2350.2029467060247,
"count": 232420,
"is_parallel": true,
"self": 1071.3675580160316,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021068759999707254,
"count": 1,
"is_parallel": true,
"self": 0.0003070489999572601,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017998270000134653,
"count": 2,
"is_parallel": true,
"self": 0.0017998270000134653
}
}
},
"UnityEnvironment.step": {
"total": 0.02685275700002876,
"count": 1,
"is_parallel": true,
"self": 0.00028207900015786436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019693299998380098,
"count": 1,
"is_parallel": true,
"self": 0.00019693299998380098
},
"communicator.exchange": {
"total": 0.025657058999968285,
"count": 1,
"is_parallel": true,
"self": 0.025657058999968285
},
"steps_from_proto": {
"total": 0.0007166859999188091,
"count": 1,
"is_parallel": true,
"self": 0.0002471329999025329,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004695530000162762,
"count": 2,
"is_parallel": true,
"self": 0.0004695530000162762
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1278.8353886899931,
"count": 232419,
"is_parallel": true,
"self": 35.80268463311768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 79.30227331693288,
"count": 232419,
"is_parallel": true,
"self": 79.30227331693288
},
"communicator.exchange": {
"total": 1063.2945140320567,
"count": 232419,
"is_parallel": true,
"self": 1063.2945140320567
},
"steps_from_proto": {
"total": 100.43591670788601,
"count": 232419,
"is_parallel": true,
"self": 41.97863699486879,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.457279713017215,
"count": 464838,
"is_parallel": true,
"self": 58.457279713017215
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 479.26981579706194,
"count": 232420,
"self": 7.025895584151272,
"children": {
"process_trajectory": {
"total": 158.21403121590993,
"count": 232420,
"self": 157.05176705990993,
"children": {
"RLTrainer._checkpoint": {
"total": 1.162264155999992,
"count": 10,
"self": 1.162264155999992
}
}
},
"_update_policy": {
"total": 314.02988899700074,
"count": 97,
"self": 260.2365002260159,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.79338877098485,
"count": 2910,
"self": 53.79338877098485
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.090002327458933e-07,
"count": 1,
"self": 7.090002327458933e-07
},
"TrainerController._save_models": {
"total": 0.11838822999970944,
"count": 1,
"self": 0.002582323999831715,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11580590599987772,
"count": 1,
"self": 0.11580590599987772
}
}
}
}
}
}
}