ppo-Huggy / run_logs /timers.json
TontonAurel's picture
Huggy
556ab9f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4064817428588867,
"min": 1.4064817428588867,
"max": 1.426000714302063,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71322.6875,
"min": 68826.0859375,
"max": 74902.0625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 88.26964285714286,
"min": 85.75694444444444,
"max": 428.86324786324786,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49431.0,
"min": 48970.0,
"max": 50177.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999993.0,
"min": 49964.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999993.0,
"min": 49964.0,
"max": 1999993.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4199767112731934,
"min": 0.00884157419204712,
"max": 2.462172031402588,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1355.18701171875,
"min": 1.0256226062774658,
"max": 1387.986328125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.786375030236585,
"min": 1.9388038071578946,
"max": 3.9352747741076537,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2120.3700169324875,
"min": 224.90124163031578,
"max": 2210.2280168533325,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.786375030236585,
"min": 1.9388038071578946,
"max": 3.9352747741076537,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2120.3700169324875,
"min": 224.90124163031578,
"max": 2210.2280168533325,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016757256060373038,
"min": 0.013966936854315767,
"max": 0.01927359837378996,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050271768181119114,
"min": 0.027933873708631533,
"max": 0.057637744437670335,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.056581862312224174,
"min": 0.02385709695518017,
"max": 0.06041770130395889,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16974558693667252,
"min": 0.04771419391036034,
"max": 0.18071451100210348,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.488148837316664e-06,
"min": 3.488148837316664e-06,
"max": 0.0002952849765716751,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0464446511949991e-05,
"min": 1.0464446511949991e-05,
"max": 0.0008439700686766499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10116268333333334,
"min": 0.10116268333333334,
"max": 0.19842832500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30348805,
"min": 0.20748225000000003,
"max": 0.58132335,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.801789833333332e-05,
"min": 6.801789833333332e-05,
"max": 0.004921573417500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020405369499999995,
"min": 0.00020405369499999995,
"max": 0.014068035165000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670836447",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670838645"
},
"total": 2198.915363366,
"count": 1,
"self": 0.39741177899986724,
"children": {
"run_training.setup": {
"total": 0.10277678299996751,
"count": 1,
"self": 0.10277678299996751
},
"TrainerController.start_learning": {
"total": 2198.415174804,
"count": 1,
"self": 3.7929951460887423,
"children": {
"TrainerController._reset_env": {
"total": 9.759174825000002,
"count": 1,
"self": 9.759174825000002
},
"TrainerController.advance": {
"total": 2184.7378325329114,
"count": 231828,
"self": 3.8369617578828183,
"children": {
"env_step": {
"total": 1716.9362875980025,
"count": 231828,
"self": 1443.995790237978,
"children": {
"SubprocessEnvManager._take_step": {
"total": 270.3848209139931,
"count": 231828,
"self": 14.149871071002337,
"children": {
"TorchPolicy.evaluate": {
"total": 256.2349498429908,
"count": 222926,
"self": 64.92293158998638,
"children": {
"TorchPolicy.sample_actions": {
"total": 191.3120182530044,
"count": 222926,
"self": 191.3120182530044
}
}
}
}
},
"workers": {
"total": 2.5556764460313275,
"count": 231828,
"self": 0.0,
"children": {
"worker_root": {
"total": 2190.7956522638847,
"count": 231828,
"is_parallel": true,
"self": 1002.0357012339146,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001968355999963478,
"count": 1,
"is_parallel": true,
"self": 0.00032985999996526516,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001638495999998213,
"count": 2,
"is_parallel": true,
"self": 0.001638495999998213
}
}
},
"UnityEnvironment.step": {
"total": 0.02625238499996385,
"count": 1,
"is_parallel": true,
"self": 0.00025237699998115204,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001819199999886223,
"count": 1,
"is_parallel": true,
"self": 0.0001819199999886223
},
"communicator.exchange": {
"total": 0.02514554700002236,
"count": 1,
"is_parallel": true,
"self": 0.02514554700002236
},
"steps_from_proto": {
"total": 0.0006725409999717158,
"count": 1,
"is_parallel": true,
"self": 0.00021939199996268144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00045314900000903435,
"count": 2,
"is_parallel": true,
"self": 0.00045314900000903435
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1188.7599510299701,
"count": 231827,
"is_parallel": true,
"self": 34.47788882694431,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 75.50220663402757,
"count": 231827,
"is_parallel": true,
"self": 75.50220663402757
},
"communicator.exchange": {
"total": 986.7507889290109,
"count": 231827,
"is_parallel": true,
"self": 986.7507889290109
},
"steps_from_proto": {
"total": 92.02906663998738,
"count": 231827,
"is_parallel": true,
"self": 37.77901926601186,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.25004737397552,
"count": 463654,
"is_parallel": true,
"self": 54.25004737397552
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 463.96458317702616,
"count": 231828,
"self": 6.002005326070162,
"children": {
"process_trajectory": {
"total": 145.51010185195315,
"count": 231828,
"self": 144.9122769389523,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5978249130008635,
"count": 4,
"self": 0.5978249130008635
}
}
},
"_update_policy": {
"total": 312.45247599900284,
"count": 97,
"self": 258.4878052459944,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.96467075300842,
"count": 2910,
"self": 53.96467075300842
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.599998520570807e-07,
"count": 1,
"self": 9.599998520570807e-07
},
"TrainerController._save_models": {
"total": 0.1251713399997243,
"count": 1,
"self": 0.002845296000032249,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12232604399969205,
"count": 1,
"self": 0.12232604399969205
}
}
}
}
}
}
}