ppo-Huggy / run_logs /timers.json
MetaAnomie's picture
Huggy
39527bb
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4080002307891846,
"min": 1.4080002307891846,
"max": 1.4288692474365234,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 72140.296875,
"min": 68948.90625,
"max": 77384.296875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 71.15007215007215,
"min": 69.46826516220028,
"max": 391.5859375,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49307.0,
"min": 49233.0,
"max": 50131.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999983.0,
"min": 49735.0,
"max": 1999983.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999983.0,
"min": 49735.0,
"max": 1999983.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4651577472686768,
"min": 0.07884800434112549,
"max": 2.52730655670166,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1708.354248046875,
"min": 10.013696670532227,
"max": 1753.563232421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8106377018994584,
"min": 1.7310717082399083,
"max": 4.01116632797987,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2640.7719274163246,
"min": 219.84610694646835,
"max": 2739.8075647950172,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8106377018994584,
"min": 1.7310717082399083,
"max": 4.01116632797987,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2640.7719274163246,
"min": 219.84610694646835,
"max": 2739.8075647950172,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017286441928040908,
"min": 0.013738077344411674,
"max": 0.019735542009584606,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05185932578412272,
"min": 0.027476154688823348,
"max": 0.056831908676152426,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06497033602661557,
"min": 0.022829645437498887,
"max": 0.06497033602661557,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1949110080798467,
"min": 0.045659290874997774,
"max": 0.1949110080798467,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.3746488751500085e-06,
"min": 3.3746488751500085e-06,
"max": 0.0002952639015787,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0123946625450025e-05,
"min": 1.0123946625450025e-05,
"max": 0.0008438568187143999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10112485,
"min": 0.10112485,
"max": 0.19842129999999997,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30337455,
"min": 0.20737920000000004,
"max": 0.5812856000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.613001500000016e-05,
"min": 6.613001500000016e-05,
"max": 0.00492122287,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001983900450000005,
"min": 0.0001983900450000005,
"max": 0.014066151439999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681543347",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681545864"
},
"total": 2516.748195644,
"count": 1,
"self": 0.4387531120000858,
"children": {
"run_training.setup": {
"total": 0.11516139200000453,
"count": 1,
"self": 0.11516139200000453
},
"TrainerController.start_learning": {
"total": 2516.19428114,
"count": 1,
"self": 4.597975983880588,
"children": {
"TrainerController._reset_env": {
"total": 3.9774160970000025,
"count": 1,
"self": 3.9774160970000025
},
"TrainerController.advance": {
"total": 2507.4969327101194,
"count": 233190,
"self": 5.334310227271544,
"children": {
"env_step": {
"total": 1953.1654414718805,
"count": 233190,
"self": 1657.3099975209036,
"children": {
"SubprocessEnvManager._take_step": {
"total": 292.8552575309985,
"count": 233190,
"self": 17.265808678032215,
"children": {
"TorchPolicy.evaluate": {
"total": 275.58944885296626,
"count": 223014,
"self": 275.58944885296626
}
}
},
"workers": {
"total": 3.0001864199783768,
"count": 233190,
"self": 0.0,
"children": {
"worker_root": {
"total": 2507.8047765039346,
"count": 233190,
"is_parallel": true,
"self": 1150.1146080448639,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011299380000195924,
"count": 1,
"is_parallel": true,
"self": 0.0002802080000492424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00084972999997035,
"count": 2,
"is_parallel": true,
"self": 0.00084972999997035
}
}
},
"UnityEnvironment.step": {
"total": 0.0296065139999655,
"count": 1,
"is_parallel": true,
"self": 0.00036408299990853266,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002884820000303989,
"count": 1,
"is_parallel": true,
"self": 0.0002884820000303989
},
"communicator.exchange": {
"total": 0.028256415000043944,
"count": 1,
"is_parallel": true,
"self": 0.028256415000043944
},
"steps_from_proto": {
"total": 0.0006975339999826247,
"count": 1,
"is_parallel": true,
"self": 0.00021235299993804801,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00048518100004457665,
"count": 2,
"is_parallel": true,
"self": 0.00048518100004457665
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1357.6901684590707,
"count": 233189,
"is_parallel": true,
"self": 40.61391308518478,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.52552504102175,
"count": 233189,
"is_parallel": true,
"self": 83.52552504102175
},
"communicator.exchange": {
"total": 1139.1280270448656,
"count": 233189,
"is_parallel": true,
"self": 1139.1280270448656
},
"steps_from_proto": {
"total": 94.42270328799856,
"count": 233189,
"is_parallel": true,
"self": 35.72449040099008,
"children": {
"_process_rank_one_or_two_observation": {
"total": 58.69821288700848,
"count": 466378,
"is_parallel": true,
"self": 58.69821288700848
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 548.9971810109671,
"count": 233190,
"self": 7.040405595036077,
"children": {
"process_trajectory": {
"total": 146.76553318293145,
"count": 233190,
"self": 145.39846542793077,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3670677550006758,
"count": 10,
"self": 1.3670677550006758
}
}
},
"_update_policy": {
"total": 395.19124223299957,
"count": 97,
"self": 334.02748900999075,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.163753223008825,
"count": 2910,
"self": 61.163753223008825
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0619996828609146e-06,
"count": 1,
"self": 1.0619996828609146e-06
},
"TrainerController._save_models": {
"total": 0.12195528700021896,
"count": 1,
"self": 0.0021523210002669657,
"children": {
"RLTrainer._checkpoint": {
"total": 0.119802965999952,
"count": 1,
"self": 0.119802965999952
}
}
}
}
}
}
}