ppo-Huggy / run_logs /timers.json
gufte's picture
Huggy
672dac2
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4031903743743896,
"min": 1.4031903743743896,
"max": 1.4276707172393799,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69730.140625,
"min": 68356.28125,
"max": 78225.6328125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 97.00980392156863,
"min": 88.5752688172043,
"max": 416.09166666666664,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49475.0,
"min": 48860.0,
"max": 50157.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999968.0,
"min": 49301.0,
"max": 1999968.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999968.0,
"min": 49301.0,
"max": 1999968.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.304427146911621,
"min": 0.1301313191652298,
"max": 2.4216341972351074,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1175.2578125,
"min": 15.485627174377441,
"max": 1322.032470703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5729082961877188,
"min": 1.9031912872270376,
"max": 3.9220166506623864,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1822.1832310557365,
"min": 226.47976318001747,
"max": 2086.5128581523895,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5729082961877188,
"min": 1.9031912872270376,
"max": 3.9220166506623864,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1822.1832310557365,
"min": 226.47976318001747,
"max": 2086.5128581523895,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01800629577677076,
"min": 0.013849369716869356,
"max": 0.021390682798422253,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05401888733031228,
"min": 0.02769873943373871,
"max": 0.06306579166048323,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055813063763909876,
"min": 0.021189078409224748,
"max": 0.06549064082403977,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16743919129172963,
"min": 0.042378156818449496,
"max": 0.19626108594238761,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4588988470666723e-06,
"min": 3.4588988470666723e-06,
"max": 0.00029537407654197496,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0376696541200017e-05,
"min": 1.0376696541200017e-05,
"max": 0.0008442165185945,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10115293333333335,
"min": 0.10115293333333335,
"max": 0.19845802499999998,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30345880000000003,
"min": 0.20745364999999993,
"max": 0.5814055,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.753137333333346e-05,
"min": 6.753137333333346e-05,
"max": 0.0049230554475,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020259412000000037,
"min": 0.00020259412000000037,
"max": 0.01407213445,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1670822284",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1670827175"
},
"total": 4891.333405507,
"count": 1,
"self": 0.44786426999962714,
"children": {
"run_training.setup": {
"total": 0.17361903300002268,
"count": 1,
"self": 0.17361903300002268
},
"TrainerController.start_learning": {
"total": 4890.711922204,
"count": 1,
"self": 6.070225432978077,
"children": {
"TrainerController._reset_env": {
"total": 9.081358973000079,
"count": 1,
"self": 9.081358973000079
},
"TrainerController.advance": {
"total": 4875.426702435021,
"count": 231354,
"self": 5.50657033150037,
"children": {
"env_step": {
"total": 2313.8609092006745,
"count": 231354,
"self": 1889.5198155639464,
"children": {
"SubprocessEnvManager._take_step": {
"total": 419.97341973775156,
"count": 231354,
"self": 21.825351979623292,
"children": {
"TorchPolicy.evaluate": {
"total": 398.14806775812826,
"count": 222896,
"self": 47.42889918699825,
"children": {
"TorchPolicy.sample_actions": {
"total": 350.71916857113,
"count": 222896,
"self": 350.71916857113
}
}
}
}
},
"workers": {
"total": 4.367673898976591,
"count": 231354,
"self": 0.0,
"children": {
"worker_root": {
"total": 4878.015777337923,
"count": 231354,
"is_parallel": true,
"self": 3366.3363934388694,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025458299999172596,
"count": 1,
"is_parallel": true,
"self": 0.0003417399998397741,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022040900000774855,
"count": 2,
"is_parallel": true,
"self": 0.0022040900000774855
}
}
},
"UnityEnvironment.step": {
"total": 0.028869528999848626,
"count": 1,
"is_parallel": true,
"self": 0.00027524999995875987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020797000001948618,
"count": 1,
"is_parallel": true,
"self": 0.00020797000001948618
},
"communicator.exchange": {
"total": 0.027670297999975446,
"count": 1,
"is_parallel": true,
"self": 0.027670297999975446
},
"steps_from_proto": {
"total": 0.0007160109998949338,
"count": 1,
"is_parallel": true,
"self": 0.00023696199991718458,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004790489999777492,
"count": 2,
"is_parallel": true,
"self": 0.0004790489999777492
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1511.6793838990538,
"count": 231353,
"is_parallel": true,
"self": 46.13832851901452,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.31534391210744,
"count": 231353,
"is_parallel": true,
"self": 81.31534391210744
},
"communicator.exchange": {
"total": 1264.2269795600057,
"count": 231353,
"is_parallel": true,
"self": 1264.2269795600057
},
"steps_from_proto": {
"total": 119.99873190792619,
"count": 231353,
"is_parallel": true,
"self": 44.83125212508867,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.16747978283752,
"count": 462706,
"is_parallel": true,
"self": 75.16747978283752
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2556.059222902846,
"count": 231354,
"self": 11.325239226972826,
"children": {
"process_trajectory": {
"total": 266.8866002218697,
"count": 231354,
"self": 266.3669156168694,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5196846050002932,
"count": 4,
"self": 0.5196846050002932
}
}
},
"_update_policy": {
"total": 2277.8473834540036,
"count": 97,
"self": 312.9590774600222,
"children": {
"TorchPPOOptimizer.update": {
"total": 1964.8883059939815,
"count": 2910,
"self": 1964.8883059939815
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.500001058564521e-07,
"count": 1,
"self": 8.500001058564521e-07
},
"TrainerController._save_models": {
"total": 0.13363451300028828,
"count": 1,
"self": 0.004552239000986447,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12908227399930183,
"count": 1,
"self": 0.12908227399930183
}
}
}
}
}
}
}