ppo-Huggy / run_logs /timers.json
RTT's picture
Huggy
e7b8405
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4036943912506104,
"min": 1.4036943912506104,
"max": 1.427950382232666,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69635.875,
"min": 68998.015625,
"max": 76787.90625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 79.34189406099519,
"min": 75.32824427480917,
"max": 387.6279069767442,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49430.0,
"min": 49216.0,
"max": 50004.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999972.0,
"min": 49422.0,
"max": 1999972.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999972.0,
"min": 49422.0,
"max": 1999972.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.481522798538208,
"min": 0.07875049114227295,
"max": 2.5269083976745605,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1545.98876953125,
"min": 10.080062866210938,
"max": 1616.9422607421875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.911913550014863,
"min": 1.8241050431970507,
"max": 4.039455529598617,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2437.12214165926,
"min": 233.4854455292225,
"max": 2576.8544570207596,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.911913550014863,
"min": 1.8241050431970507,
"max": 4.039455529598617,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2437.12214165926,
"min": 233.4854455292225,
"max": 2576.8544570207596,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01591075180977997,
"min": 0.01410970851979477,
"max": 0.020101180782269995,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047732255429339905,
"min": 0.02899217426117199,
"max": 0.06016871039949667,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05947538928853141,
"min": 0.022507093598445257,
"max": 0.06346922311931849,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17842616786559423,
"min": 0.045014187196890515,
"max": 0.17842616786559423,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7657987447666712e-06,
"min": 3.7657987447666712e-06,
"max": 0.0002953467765510749,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1297396234300013e-05,
"min": 1.1297396234300013e-05,
"max": 0.00084413026862325,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10125523333333335,
"min": 0.10125523333333335,
"max": 0.19844892500000005,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037657,
"min": 0.20763355000000006,
"max": 0.5813767500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.263614333333342e-05,
"min": 7.263614333333342e-05,
"max": 0.0049226013575000005,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021790843000000026,
"min": 0.00021790843000000026,
"max": 0.014070699825000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675190640",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675192828"
},
"total": 2188.007763639,
"count": 1,
"self": 0.45193010299999514,
"children": {
"run_training.setup": {
"total": 0.10513413199998922,
"count": 1,
"self": 0.10513413199998922
},
"TrainerController.start_learning": {
"total": 2187.4506994040003,
"count": 1,
"self": 3.688512187964534,
"children": {
"TrainerController._reset_env": {
"total": 10.033902658999978,
"count": 1,
"self": 10.033902658999978
},
"TrainerController.advance": {
"total": 2173.611348772036,
"count": 233116,
"self": 4.166959232141835,
"children": {
"env_step": {
"total": 1712.0870169449258,
"count": 233116,
"self": 1441.4105024579542,
"children": {
"SubprocessEnvManager._take_step": {
"total": 268.13260019102063,
"count": 233116,
"self": 14.157209633951652,
"children": {
"TorchPolicy.evaluate": {
"total": 253.97539055706898,
"count": 222873,
"self": 63.687696769069134,
"children": {
"TorchPolicy.sample_actions": {
"total": 190.28769378799984,
"count": 222873,
"self": 190.28769378799984
}
}
}
}
},
"workers": {
"total": 2.5439142959510264,
"count": 233116,
"self": 0.0,
"children": {
"worker_root": {
"total": 2179.7030328739083,
"count": 233116,
"is_parallel": true,
"self": 989.4099094719579,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001640661999999793,
"count": 1,
"is_parallel": true,
"self": 0.0003326030000607716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013080589999390213,
"count": 2,
"is_parallel": true,
"self": 0.0013080589999390213
}
}
},
"UnityEnvironment.step": {
"total": 0.02725165399999696,
"count": 1,
"is_parallel": true,
"self": 0.00026923199993689195,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001868499999773121,
"count": 1,
"is_parallel": true,
"self": 0.0001868499999773121
},
"communicator.exchange": {
"total": 0.025919751000060387,
"count": 1,
"is_parallel": true,
"self": 0.025919751000060387
},
"steps_from_proto": {
"total": 0.0008758210000223698,
"count": 1,
"is_parallel": true,
"self": 0.00039758299999448354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004782380000278863,
"count": 2,
"is_parallel": true,
"self": 0.0004782380000278863
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1190.2931234019504,
"count": 233115,
"is_parallel": true,
"self": 34.20658011695127,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 73.70842233304461,
"count": 233115,
"is_parallel": true,
"self": 73.70842233304461
},
"communicator.exchange": {
"total": 992.6882272030336,
"count": 233115,
"is_parallel": true,
"self": 992.6882272030336
},
"steps_from_proto": {
"total": 89.68989374892101,
"count": 233115,
"is_parallel": true,
"self": 37.100943798019784,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.588949950901224,
"count": 466230,
"is_parallel": true,
"self": 52.588949950901224
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 457.3573725949683,
"count": 233116,
"self": 5.997789705886589,
"children": {
"process_trajectory": {
"total": 149.56374839708144,
"count": 233116,
"self": 148.4699619090817,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0937864879997505,
"count": 10,
"self": 1.0937864879997505
}
}
},
"_update_policy": {
"total": 301.79583449200027,
"count": 97,
"self": 248.49928454499286,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.296549947007406,
"count": 2910,
"self": 53.296549947007406
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.859997251420282e-07,
"count": 1,
"self": 8.859997251420282e-07
},
"TrainerController._save_models": {
"total": 0.11693489900017084,
"count": 1,
"self": 0.00202515000000858,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11490974900016226,
"count": 1,
"self": 0.11490974900016226
}
}
}
}
}
}
}