ppo-Huggy / run_logs /timers.json
FabioDataGeek's picture
Huggy
19995d9
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4066733121871948,
"min": 1.4066733121871948,
"max": 1.4260395765304565,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70682.5234375,
"min": 68756.8984375,
"max": 77895.625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 83.55743243243244,
"min": 80.6704730831974,
"max": 399.568,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49466.0,
"min": 48728.0,
"max": 50148.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999320.0,
"min": 49909.0,
"max": 1999320.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999320.0,
"min": 49909.0,
"max": 1999320.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.5228323936462402,
"min": -0.00604019733145833,
"max": 2.5228323936462402,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1493.516845703125,
"min": -0.7489844560623169,
"max": 1493.516845703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.9236251272865244,
"min": 1.815928166551936,
"max": 3.9469441567953747,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2322.7860753536224,
"min": 225.17509265244007,
"max": 2322.7860753536224,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.9236251272865244,
"min": 1.815928166551936,
"max": 3.9469441567953747,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2322.7860753536224,
"min": 225.17509265244007,
"max": 2322.7860753536224,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.013080397719265117,
"min": 0.012551949970818694,
"max": 0.01958859779179976,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.026160795438530234,
"min": 0.026160795438530234,
"max": 0.05876579337539928,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.053748563304543494,
"min": 0.024006393272429705,
"max": 0.06289466203500826,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10749712660908699,
"min": 0.04801278654485941,
"max": 0.17065930751462777,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.684823438424995e-06,
"min": 4.684823438424995e-06,
"max": 0.00029528122657292497,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.36964687684999e-06,
"min": 9.36964687684999e-06,
"max": 0.0008437941187353,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10156157500000002,
"min": 0.10156157500000002,
"max": 0.19842707499999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20312315000000003,
"min": 0.20312315000000003,
"max": 0.5812647000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.792259249999991e-05,
"min": 8.792259249999991e-05,
"max": 0.004921511042499999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017584518499999982,
"min": 0.00017584518499999982,
"max": 0.01406510853,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673424749",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673426982"
},
"total": 2233.1550201,
"count": 1,
"self": 0.4444433809999282,
"children": {
"run_training.setup": {
"total": 0.11025964899999963,
"count": 1,
"self": 0.11025964899999963
},
"TrainerController.start_learning": {
"total": 2232.60031707,
"count": 1,
"self": 3.710735082991505,
"children": {
"TrainerController._reset_env": {
"total": 7.683370799000045,
"count": 1,
"self": 7.683370799000045
},
"TrainerController.advance": {
"total": 2221.083965601009,
"count": 232677,
"self": 4.157150501929664,
"children": {
"env_step": {
"total": 1735.6076938860517,
"count": 232677,
"self": 1459.149476049994,
"children": {
"SubprocessEnvManager._take_step": {
"total": 273.8373248020031,
"count": 232677,
"self": 14.167743288913698,
"children": {
"TorchPolicy.evaluate": {
"total": 259.6695815130894,
"count": 222935,
"self": 64.90346378401887,
"children": {
"TorchPolicy.sample_actions": {
"total": 194.76611772907052,
"count": 222935,
"self": 194.76611772907052
}
}
}
}
},
"workers": {
"total": 2.6208930340546885,
"count": 232677,
"self": 0.0,
"children": {
"worker_root": {
"total": 2224.906082484216,
"count": 232677,
"is_parallel": true,
"self": 1023.0448284832044,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019696909999993295,
"count": 1,
"is_parallel": true,
"self": 0.0002939019999530501,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016757890000462794,
"count": 2,
"is_parallel": true,
"self": 0.0016757890000462794
}
}
},
"UnityEnvironment.step": {
"total": 0.026980960000059895,
"count": 1,
"is_parallel": true,
"self": 0.0003032230001736025,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019526599999153404,
"count": 1,
"is_parallel": true,
"self": 0.00019526599999153404
},
"communicator.exchange": {
"total": 0.0255622229999517,
"count": 1,
"is_parallel": true,
"self": 0.0255622229999517
},
"steps_from_proto": {
"total": 0.000920247999943058,
"count": 1,
"is_parallel": true,
"self": 0.00043187100004615786,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004883769998969001,
"count": 2,
"is_parallel": true,
"self": 0.0004883769998969001
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1201.8612540010115,
"count": 232676,
"is_parallel": true,
"self": 35.28769305793071,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 76.75263964797955,
"count": 232676,
"is_parallel": true,
"self": 76.75263964797955
},
"communicator.exchange": {
"total": 994.2759957990544,
"count": 232676,
"is_parallel": true,
"self": 994.2759957990544
},
"steps_from_proto": {
"total": 95.54492549604663,
"count": 232676,
"is_parallel": true,
"self": 39.597515421167145,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.947410074879485,
"count": 465352,
"is_parallel": true,
"self": 55.947410074879485
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 481.31912121302753,
"count": 232677,
"self": 5.926870049944682,
"children": {
"process_trajectory": {
"total": 148.31379330308255,
"count": 232677,
"self": 147.09321613008274,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2205771729998105,
"count": 10,
"self": 1.2205771729998105
}
}
},
"_update_policy": {
"total": 327.0784578600003,
"count": 96,
"self": 273.7292207259778,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.349237134022474,
"count": 2880,
"self": 53.349237134022474
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.539999155094847e-07,
"count": 1,
"self": 9.539999155094847e-07
},
"TrainerController._save_models": {
"total": 0.12224463299980926,
"count": 1,
"self": 0.0022154079997562803,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12002922500005297,
"count": 1,
"self": 0.12002922500005297
}
}
}
}
}
}
}