ppo-Huggy / run_logs /timers.json
omriKramer's picture
Huggy
76252ff
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402302622795105,
"min": 1.402302622795105,
"max": 1.4277722835540771,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69746.328125,
"min": 67453.046875,
"max": 76627.390625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 132.584,
"min": 109.546875,
"max": 409.23770491803276,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49719.0,
"min": 49077.0,
"max": 50157.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49979.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49979.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.1860456466674805,
"min": 0.1568102091550827,
"max": 2.301436185836792,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 819.76708984375,
"min": 18.974035263061523,
"max": 1013.4827880859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.248182586510976,
"min": 1.8255478456493253,
"max": 3.7022144983086407,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1218.068469941616,
"min": 220.89128932356834,
"max": 1612.7823528051376,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.248182586510976,
"min": 1.8255478456493253,
"max": 3.7022144983086407,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1218.068469941616,
"min": 220.89128932356834,
"max": 1612.7823528051376,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016591947024183658,
"min": 0.013831849939985355,
"max": 0.019532207721689093,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.033183894048367316,
"min": 0.02809368256785092,
"max": 0.05731817363218093,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.040263989195227626,
"min": 0.02234817426651716,
"max": 0.061037447117269036,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.08052797839045525,
"min": 0.04469634853303432,
"max": 0.1786975134164095,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.421498526200008e-06,
"min": 4.421498526200008e-06,
"max": 0.000295341376552875,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.842997052400016e-06,
"min": 8.842997052400016e-06,
"max": 0.0008439588186804001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10147380000000004,
"min": 0.10147380000000004,
"max": 0.198447125,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.2029476000000001,
"min": 0.2029476000000001,
"max": 0.5813196,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.354262000000016e-05,
"min": 8.354262000000016e-05,
"max": 0.0049225115375,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016708524000000032,
"min": 0.00016708524000000032,
"max": 0.01406784804,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701283092",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701285569"
},
"total": 2476.63465734,
"count": 1,
"self": 0.8785393960001784,
"children": {
"run_training.setup": {
"total": 0.09007066200001645,
"count": 1,
"self": 0.09007066200001645
},
"TrainerController.start_learning": {
"total": 2475.666047282,
"count": 1,
"self": 4.599156041059359,
"children": {
"TrainerController._reset_env": {
"total": 4.371591767999973,
"count": 1,
"self": 4.371591767999973
},
"TrainerController.advance": {
"total": 2466.53798254294,
"count": 230189,
"self": 4.643235431040466,
"children": {
"env_step": {
"total": 1964.0754330539326,
"count": 230189,
"self": 1621.788007817959,
"children": {
"SubprocessEnvManager._take_step": {
"total": 339.38130977899465,
"count": 230189,
"self": 17.735043936953957,
"children": {
"TorchPolicy.evaluate": {
"total": 321.6462658420407,
"count": 223018,
"self": 321.6462658420407
}
}
},
"workers": {
"total": 2.9061154569789664,
"count": 230189,
"self": 0.0,
"children": {
"worker_root": {
"total": 2467.851145623044,
"count": 230189,
"is_parallel": true,
"self": 1150.8946067959428,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009758429999919827,
"count": 1,
"is_parallel": true,
"self": 0.000300678999963111,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006751640000288717,
"count": 2,
"is_parallel": true,
"self": 0.0006751640000288717
}
}
},
"UnityEnvironment.step": {
"total": 0.030973086000017247,
"count": 1,
"is_parallel": true,
"self": 0.00033174200001440113,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002114580000238675,
"count": 1,
"is_parallel": true,
"self": 0.0002114580000238675
},
"communicator.exchange": {
"total": 0.02963516699998081,
"count": 1,
"is_parallel": true,
"self": 0.02963516699998081
},
"steps_from_proto": {
"total": 0.0007947189999981674,
"count": 1,
"is_parallel": true,
"self": 0.0002054879999491277,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005892310000490397,
"count": 2,
"is_parallel": true,
"self": 0.0005892310000490397
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1316.9565388271012,
"count": 230188,
"is_parallel": true,
"self": 40.77407940011449,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.27807187301158,
"count": 230188,
"is_parallel": true,
"self": 84.27807187301158
},
"communicator.exchange": {
"total": 1101.0337415740107,
"count": 230188,
"is_parallel": true,
"self": 1101.0337415740107
},
"steps_from_proto": {
"total": 90.87064597996431,
"count": 230188,
"is_parallel": true,
"self": 31.79608686889759,
"children": {
"_process_rank_one_or_two_observation": {
"total": 59.07455911106672,
"count": 460376,
"is_parallel": true,
"self": 59.07455911106672
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 497.81931405796695,
"count": 230189,
"self": 7.272782934953739,
"children": {
"process_trajectory": {
"total": 146.4029708050117,
"count": 230189,
"self": 145.1180132810116,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2849575240001059,
"count": 10,
"self": 1.2849575240001059
}
}
},
"_update_policy": {
"total": 344.1435603180015,
"count": 96,
"self": 279.5619094100063,
"children": {
"TorchPPOOptimizer.update": {
"total": 64.5816509079952,
"count": 2880,
"self": 64.5816509079952
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.187000179925235e-06,
"count": 1,
"self": 1.187000179925235e-06
},
"TrainerController._save_models": {
"total": 0.15731574300025386,
"count": 1,
"self": 0.0025766610006030533,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1547390819996508,
"count": 1,
"self": 0.1547390819996508
}
}
}
}
}
}
}