ppo-Huggy / run_logs /timers.json
Suprabound's picture
Supra Huggy pushed
6b6366e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.424880027770996,
"min": 1.419359803199768,
"max": 1.4278417825698853,
"count": 10
},
"Huggy.Policy.Entropy.sum": {
"value": 71566.0234375,
"min": 69006.6875,
"max": 77695.7578125,
"count": 10
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 112.83863636363637,
"min": 112.83863636363637,
"max": 370.85925925925926,
"count": 10
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49649.0,
"min": 49547.0,
"max": 50066.0,
"count": 10
},
"Huggy.Step.mean": {
"value": 499955.0,
"min": 49886.0,
"max": 499955.0,
"count": 10
},
"Huggy.Step.sum": {
"value": 499955.0,
"min": 49886.0,
"max": 499955.0,
"count": 10
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1364572048187256,
"min": 0.19079284369945526,
"max": 2.1364572048187256,
"count": 10
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 940.0411987304688,
"min": 25.566240310668945,
"max": 940.0411987304688,
"count": 10
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.5226223757321184,
"min": 1.8495269066362239,
"max": 3.7265998373076705,
"count": 10
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1549.953845322132,
"min": 247.836605489254,
"max": 1576.3517311811447,
"count": 10
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.5226223757321184,
"min": 1.8495269066362239,
"max": 3.7265998373076705,
"count": 10
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1549.953845322132,
"min": 247.836605489254,
"max": 1576.3517311811447,
"count": 10
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01690871171530388,
"min": 0.015733202764547764,
"max": 0.017179424657661,
"count": 10
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050726135145911636,
"min": 0.03179269783819715,
"max": 0.050726135145911636,
"count": 10
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.046751973612440954,
"min": 0.019597548774133125,
"max": 0.046751973612440954,
"count": 10
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14025592083732286,
"min": 0.03919509754826625,
"max": 0.14025592083732286,
"count": 10
},
"Huggy.Policy.LearningRate.mean": {
"value": 0.00022888877370374995,
"min": 0.00022888877370374995,
"max": 0.00029531692656102496,
"count": 10
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.0006866663211112498,
"min": 0.00047324974225010006,
"max": 0.00084429001857,
"count": 10
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.17629624999999996,
"min": 0.17629624999999996,
"max": 0.19843897500000007,
"count": 10
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.5288887499999999,
"min": 0.3577499000000002,
"max": 0.5814300000000002,
"count": 10
},
"Huggy.Policy.Beta.mean": {
"value": 0.0038171828750000014,
"min": 0.0038171828750000014,
"max": 0.0049221048524999995,
"count": 10
},
"Huggy.Policy.Beta.sum": {
"value": 0.011451548625000004,
"min": 0.007891720010000002,
"max": 0.014073356999999998,
"count": 10
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677079966",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1677080580"
},
"total": 613.1260742410001,
"count": 1,
"self": 0.4239877320000005,
"children": {
"run_training.setup": {
"total": 0.10650919199997588,
"count": 1,
"self": 0.10650919199997588
},
"TrainerController.start_learning": {
"total": 612.5955773170001,
"count": 1,
"self": 1.0608020760100771,
"children": {
"TrainerController._reset_env": {
"total": 11.493426053999997,
"count": 1,
"self": 11.493426053999997
},
"TrainerController.advance": {
"total": 599.7698901539901,
"count": 59117,
"self": 1.1447138609887588,
"children": {
"env_step": {
"total": 470.28813802501384,
"count": 59117,
"self": 391.4288881670209,
"children": {
"SubprocessEnvManager._take_step": {
"total": 78.15759345000322,
"count": 59117,
"self": 4.097086826998634,
"children": {
"TorchPolicy.evaluate": {
"total": 74.06050662300458,
"count": 57682,
"self": 18.85685082902137,
"children": {
"TorchPolicy.sample_actions": {
"total": 55.203655793983216,
"count": 57682,
"self": 55.203655793983216
}
}
}
}
},
"workers": {
"total": 0.7016564079897307,
"count": 59116,
"self": 0.0,
"children": {
"worker_root": {
"total": 610.3095443380075,
"count": 59116,
"is_parallel": true,
"self": 292.44571776800933,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002080770000020493,
"count": 1,
"is_parallel": true,
"self": 0.0003421019999905184,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017386680000299748,
"count": 2,
"is_parallel": true,
"self": 0.0017386680000299748
}
}
},
"UnityEnvironment.step": {
"total": 0.02897272799998518,
"count": 1,
"is_parallel": true,
"self": 0.0003100750000157859,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020144000006894203,
"count": 1,
"is_parallel": true,
"self": 0.00020144000006894203
},
"communicator.exchange": {
"total": 0.02758207699991999,
"count": 1,
"is_parallel": true,
"self": 0.02758207699991999
},
"steps_from_proto": {
"total": 0.0008791359999804627,
"count": 1,
"is_parallel": true,
"self": 0.0004059729999426054,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004731630000378573,
"count": 2,
"is_parallel": true,
"self": 0.0004731630000378573
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 317.8638265699982,
"count": 59115,
"is_parallel": true,
"self": 9.760329399991974,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 19.73482288197829,
"count": 59115,
"is_parallel": true,
"self": 19.73482288197829
},
"communicator.exchange": {
"total": 264.7674965380096,
"count": 59115,
"is_parallel": true,
"self": 264.7674965380096
},
"steps_from_proto": {
"total": 23.601177750018337,
"count": 59115,
"is_parallel": true,
"self": 9.620837644035646,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.980340105982691,
"count": 118230,
"is_parallel": true,
"self": 13.980340105982691
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 128.3370382679875,
"count": 59116,
"self": 1.751743170001646,
"children": {
"process_trajectory": {
"total": 36.43956597698559,
"count": 59116,
"self": 36.13098706098549,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30857891600010134,
"count": 2,
"self": 0.30857891600010134
}
}
},
"_update_policy": {
"total": 90.14572912100027,
"count": 25,
"self": 75.40279522299943,
"children": {
"TorchPPOOptimizer.update": {
"total": 14.742933898000842,
"count": 750,
"self": 14.742933898000842
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6700000742275734e-06,
"count": 1,
"self": 1.6700000742275734e-06
},
"TrainerController._save_models": {
"total": 0.271457362999854,
"count": 1,
"self": 0.007578113999670677,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26387924900018334,
"count": 1,
"self": 0.26387924900018334
}
}
}
}
}
}
}