ppo-Huggy / run_logs /timers.json
ImDachun's picture
push default Huggy model
e78a7ee
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4073729515075684,
"min": 1.4073729515075684,
"max": 1.4285449981689453,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70524.8671875,
"min": 68882.6640625,
"max": 77087.953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 77.29337539432177,
"min": 77.29337539432177,
"max": 379.0530303030303,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49004.0,
"min": 49004.0,
"max": 50035.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999946.0,
"min": 49607.0,
"max": 1999946.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999946.0,
"min": 49607.0,
"max": 1999946.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4270224571228027,
"min": 0.07599532604217529,
"max": 2.4905922412872314,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1538.732177734375,
"min": 9.955388069152832,
"max": 1538.732177734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7131955209026577,
"min": 1.8727728314982115,
"max": 3.9397490755744893,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2354.165960252285,
"min": 245.33324092626572,
"max": 2364.095301926136,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7131955209026577,
"min": 1.8727728314982115,
"max": 3.9397490755744893,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2354.165960252285,
"min": 245.33324092626572,
"max": 2364.095301926136,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015197058415570711,
"min": 0.013376872271085935,
"max": 0.019383885483128124,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.045591175246712135,
"min": 0.027495088981716737,
"max": 0.05815165644938437,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.058131462790899806,
"min": 0.021724144803980987,
"max": 0.06409553109357755,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1743943883726994,
"min": 0.043448289607961973,
"max": 0.17649627501765885,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.704898765066663e-06,
"min": 3.704898765066663e-06,
"max": 0.00029533305155565005,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1114696295199989e-05,
"min": 1.1114696295199989e-05,
"max": 0.00084429856856715,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123493333333333,
"min": 0.10123493333333333,
"max": 0.19844435,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037048,
"min": 0.20763715000000002,
"max": 0.58143285,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.16231733333333e-05,
"min": 7.16231733333333e-05,
"max": 0.004922373065,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002148695199999999,
"min": 0.0002148695199999999,
"max": 0.014073499215000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676847240",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676849623"
},
"total": 2383.8225083469997,
"count": 1,
"self": 0.44899078399976133,
"children": {
"run_training.setup": {
"total": 0.19086859200001527,
"count": 1,
"self": 0.19086859200001527
},
"TrainerController.start_learning": {
"total": 2383.1826489709997,
"count": 1,
"self": 4.196859161939301,
"children": {
"TrainerController._reset_env": {
"total": 11.039255212,
"count": 1,
"self": 11.039255212
},
"TrainerController.advance": {
"total": 2367.8256110960606,
"count": 232703,
"self": 4.39651916407729,
"children": {
"env_step": {
"total": 1843.7653885210118,
"count": 232703,
"self": 1543.881993889068,
"children": {
"SubprocessEnvManager._take_step": {
"total": 297.19041005696045,
"count": 232703,
"self": 15.819768018059733,
"children": {
"TorchPolicy.evaluate": {
"total": 281.3706420389007,
"count": 222986,
"self": 69.79419083793084,
"children": {
"TorchPolicy.sample_actions": {
"total": 211.57645120096987,
"count": 222986,
"self": 211.57645120096987
}
}
}
}
},
"workers": {
"total": 2.6929845749833135,
"count": 232703,
"self": 0.0,
"children": {
"worker_root": {
"total": 2375.117011702922,
"count": 232703,
"is_parallel": true,
"self": 1118.5221191669748,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024471520000020064,
"count": 1,
"is_parallel": true,
"self": 0.0004498590000139302,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001997292999988076,
"count": 2,
"is_parallel": true,
"self": 0.001997292999988076
}
}
},
"UnityEnvironment.step": {
"total": 0.029107236000015746,
"count": 1,
"is_parallel": true,
"self": 0.0003360989999805497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019972300003701093,
"count": 1,
"is_parallel": true,
"self": 0.00019972300003701093
},
"communicator.exchange": {
"total": 0.027638286000012613,
"count": 1,
"is_parallel": true,
"self": 0.027638286000012613
},
"steps_from_proto": {
"total": 0.0009331279999855724,
"count": 1,
"is_parallel": true,
"self": 0.0004421630000024379,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004909649999831345,
"count": 2,
"is_parallel": true,
"self": 0.0004909649999831345
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.594892535947,
"count": 232702,
"is_parallel": true,
"self": 38.50665326785111,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.79305248806429,
"count": 232702,
"is_parallel": true,
"self": 80.79305248806429
},
"communicator.exchange": {
"total": 1044.6320064370102,
"count": 232702,
"is_parallel": true,
"self": 1044.6320064370102
},
"steps_from_proto": {
"total": 92.66318034302134,
"count": 232702,
"is_parallel": true,
"self": 37.50480534209231,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.15837500092903,
"count": 465404,
"is_parallel": true,
"self": 55.15837500092903
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 519.6637034109715,
"count": 232703,
"self": 6.395361365035342,
"children": {
"process_trajectory": {
"total": 162.36675395893798,
"count": 232703,
"self": 161.07904010393764,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2877138550003338,
"count": 10,
"self": 1.2877138550003338
}
}
},
"_update_policy": {
"total": 350.9015880869981,
"count": 97,
"self": 293.89923987198097,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.00234821501715,
"count": 2910,
"self": 57.00234821501715
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.890000001178123e-07,
"count": 1,
"self": 9.890000001178123e-07
},
"TrainerController._save_models": {
"total": 0.12092251199965176,
"count": 1,
"self": 0.0022390099998119695,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11868350199983979,
"count": 1,
"self": 0.11868350199983979
}
}
}
}
}
}
}