ppo-Huggy-test / run_logs /timers.json
miki030's picture
Huggy
9120542
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4073182344436646,
"min": 1.4073182344436646,
"max": 1.4288955926895142,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70443.3125,
"min": 69219.828125,
"max": 77798.6875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 90.08823529411765,
"min": 77.92902208201893,
"max": 413.23770491803276,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49008.0,
"min": 48908.0,
"max": 50415.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999979.0,
"min": 49954.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999979.0,
"min": 49954.0,
"max": 1999979.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.450843572616577,
"min": 0.15412874519824982,
"max": 2.5021941661834717,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1333.2589111328125,
"min": 18.649578094482422,
"max": 1556.6134033203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.761364747934482,
"min": 1.9229418403846172,
"max": 4.0289105412501085,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2046.182422876358,
"min": 232.6759626865387,
"max": 2472.999159038067,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.761364747934482,
"min": 1.9229418403846172,
"max": 4.0289105412501085,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2046.182422876358,
"min": 232.6759626865387,
"max": 2472.999159038067,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015801548398869474,
"min": 0.012923756687814602,
"max": 0.021455501274128132,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04740464519660842,
"min": 0.025847513375629204,
"max": 0.05508991567379174,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.054587341927819784,
"min": 0.02123709407945474,
"max": 0.06179736703634262,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16376202578345936,
"min": 0.04247418815890948,
"max": 0.18539210110902787,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.48874883711666e-06,
"min": 3.48874883711666e-06,
"max": 0.00029536935154354994,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.046624651134998e-05,
"min": 1.046624651134998e-05,
"max": 0.00084407356864215,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116288333333334,
"min": 0.10116288333333334,
"max": 0.19845645000000003,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30348865,
"min": 0.20753310000000003,
"max": 0.5813578500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.802787833333323e-05,
"min": 6.802787833333323e-05,
"max": 0.004922976854999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002040836349999997,
"min": 0.0002040836349999997,
"max": 0.014069756715000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679992500",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679994870"
},
"total": 2370.213944879,
"count": 1,
"self": 0.44209720499975447,
"children": {
"run_training.setup": {
"total": 0.11435251300008531,
"count": 1,
"self": 0.11435251300008531
},
"TrainerController.start_learning": {
"total": 2369.657495161,
"count": 1,
"self": 4.295704401881721,
"children": {
"TrainerController._reset_env": {
"total": 9.504173465999997,
"count": 1,
"self": 9.504173465999997
},
"TrainerController.advance": {
"total": 2355.7431364881186,
"count": 232880,
"self": 4.586581669060706,
"children": {
"env_step": {
"total": 1828.3631720430824,
"count": 232880,
"self": 1543.3467540621155,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.15866295693,
"count": 232880,
"self": 16.58630601900893,
"children": {
"TorchPolicy.evaluate": {
"total": 265.5723569379211,
"count": 223052,
"self": 265.5723569379211
}
}
},
"workers": {
"total": 2.857755024036919,
"count": 232880,
"self": 0.0,
"children": {
"worker_root": {
"total": 2361.7492987700934,
"count": 232880,
"is_parallel": true,
"self": 1104.8727152869294,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009497849999888786,
"count": 1,
"is_parallel": true,
"self": 0.00025866100008897774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006911239998999008,
"count": 2,
"is_parallel": true,
"self": 0.0006911239998999008
}
}
},
"UnityEnvironment.step": {
"total": 0.02864530800002285,
"count": 1,
"is_parallel": true,
"self": 0.00027862499996444967,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021975399999973888,
"count": 1,
"is_parallel": true,
"self": 0.00021975399999973888
},
"communicator.exchange": {
"total": 0.0274698380000018,
"count": 1,
"is_parallel": true,
"self": 0.0274698380000018
},
"steps_from_proto": {
"total": 0.000677091000056862,
"count": 1,
"is_parallel": true,
"self": 0.00018914899999344925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004879420000634127,
"count": 2,
"is_parallel": true,
"self": 0.0004879420000634127
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.876583483164,
"count": 232879,
"is_parallel": true,
"self": 38.203242365256074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.17453304894843,
"count": 232879,
"is_parallel": true,
"self": 77.17453304894843
},
"communicator.exchange": {
"total": 1052.9924773070402,
"count": 232879,
"is_parallel": true,
"self": 1052.9924773070402
},
"steps_from_proto": {
"total": 88.50633076191934,
"count": 232879,
"is_parallel": true,
"self": 33.22439985095309,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.28193091096625,
"count": 465758,
"is_parallel": true,
"self": 55.28193091096625
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 522.7933827759756,
"count": 232880,
"self": 6.3551337769888505,
"children": {
"process_trajectory": {
"total": 144.99032242198814,
"count": 232880,
"self": 143.6220153169893,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3683071049988484,
"count": 10,
"self": 1.3683071049988484
}
}
},
"_update_policy": {
"total": 371.4479265769986,
"count": 97,
"self": 312.67250409600047,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.775422480998145,
"count": 2910,
"self": 58.775422480998145
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2539999261207413e-06,
"count": 1,
"self": 1.2539999261207413e-06
},
"TrainerController._save_models": {
"total": 0.11447955099993123,
"count": 1,
"self": 0.0022012740000718622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11227827699985937,
"count": 1,
"self": 0.11227827699985937
}
}
}
}
}
}
}