ppo-Huggy / run_logs /timers.json
AMfeta99's picture
Huggy
6b9396f
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.40592622756958,
"min": 1.40592622756958,
"max": 1.4275189638137817,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70012.3125,
"min": 69264.71875,
"max": 76371.3515625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 74.78549848942598,
"min": 68.5891364902507,
"max": 423.635593220339,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49508.0,
"min": 48996.0,
"max": 50049.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999481.0,
"min": 49626.0,
"max": 1999481.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999481.0,
"min": 49626.0,
"max": 1999481.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.543604612350464,
"min": 0.07354264706373215,
"max": 2.543604612350464,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1683.8662109375,
"min": 8.60448932647705,
"max": 1783.2874755859375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.962858159736564,
"min": 1.7300419752669132,
"max": 3.993906033791577,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2623.4121017456055,
"min": 202.41491110622883,
"max": 2793.7950977683067,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.962858159736564,
"min": 1.7300419752669132,
"max": 3.993906033791577,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2623.4121017456055,
"min": 202.41491110622883,
"max": 2793.7950977683067,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017122344099425393,
"min": 0.013811894195451814,
"max": 0.019511458782168728,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051367032298276175,
"min": 0.028364653515048605,
"max": 0.053912733613591016,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.059176449601848924,
"min": 0.021665290277451277,
"max": 0.06487202799568574,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.17752934880554677,
"min": 0.043330580554902555,
"max": 0.19366912407179676,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.917148694316655e-06,
"min": 3.917148694316655e-06,
"max": 0.0002953413015529,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1751446082949965e-05,
"min": 1.1751446082949965e-05,
"max": 0.0008441160186279999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10130568333333335,
"min": 0.10130568333333335,
"max": 0.19844709999999996,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3039170500000001,
"min": 0.20774589999999998,
"max": 0.5813719999999998,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.515359833333315e-05,
"min": 7.515359833333315e-05,
"max": 0.0049225102899999994,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00022546079499999943,
"min": 0.00022546079499999943,
"max": 0.0140704628,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698662138",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698664446"
},
"total": 2307.484067522,
"count": 1,
"self": 0.4271244119995572,
"children": {
"run_training.setup": {
"total": 0.04492936899998767,
"count": 1,
"self": 0.04492936899998767
},
"TrainerController.start_learning": {
"total": 2307.0120137410004,
"count": 1,
"self": 4.1806870039813475,
"children": {
"TrainerController._reset_env": {
"total": 8.071175019000066,
"count": 1,
"self": 8.071175019000066
},
"TrainerController.advance": {
"total": 2294.630348294019,
"count": 233580,
"self": 4.4498103529131185,
"children": {
"env_step": {
"total": 1809.9351272420117,
"count": 233580,
"self": 1489.9958893449061,
"children": {
"SubprocessEnvManager._take_step": {
"total": 317.26082393408274,
"count": 233580,
"self": 16.05071460311433,
"children": {
"TorchPolicy.evaluate": {
"total": 301.2101093309684,
"count": 222929,
"self": 301.2101093309684
}
}
},
"workers": {
"total": 2.678413963022763,
"count": 233580,
"self": 0.0,
"children": {
"worker_root": {
"total": 2299.5239152479885,
"count": 233580,
"is_parallel": true,
"self": 1086.226466327967,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008515239999269397,
"count": 1,
"is_parallel": true,
"self": 0.00021884499994939688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006326789999775428,
"count": 2,
"is_parallel": true,
"self": 0.0006326789999775428
}
}
},
"UnityEnvironment.step": {
"total": 0.04202124100004312,
"count": 1,
"is_parallel": true,
"self": 0.0003330580000238115,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023081000006186514,
"count": 1,
"is_parallel": true,
"self": 0.00023081000006186514
},
"communicator.exchange": {
"total": 0.04072863999999754,
"count": 1,
"is_parallel": true,
"self": 0.04072863999999754
},
"steps_from_proto": {
"total": 0.0007287329999599024,
"count": 1,
"is_parallel": true,
"self": 0.0002148879998458142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005138450001140882,
"count": 2,
"is_parallel": true,
"self": 0.0005138450001140882
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1213.2974489200215,
"count": 233579,
"is_parallel": true,
"self": 39.4324367579643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.66037477098996,
"count": 233579,
"is_parallel": true,
"self": 81.66037477098996
},
"communicator.exchange": {
"total": 1004.7452600910036,
"count": 233579,
"is_parallel": true,
"self": 1004.7452600910036
},
"steps_from_proto": {
"total": 87.45937730006358,
"count": 233579,
"is_parallel": true,
"self": 31.122486050066527,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.33689124999705,
"count": 467158,
"is_parallel": true,
"self": 56.33689124999705
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 480.2454106990941,
"count": 233580,
"self": 6.094236470022906,
"children": {
"process_trajectory": {
"total": 151.68467662907244,
"count": 233580,
"self": 150.51944123107273,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1652353979997088,
"count": 10,
"self": 1.1652353979997088
}
}
},
"_update_policy": {
"total": 322.4664975999988,
"count": 97,
"self": 262.23304381300375,
"children": {
"TorchPPOOptimizer.update": {
"total": 60.23345378699503,
"count": 2910,
"self": 60.23345378699503
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3690000741917174e-06,
"count": 1,
"self": 1.3690000741917174e-06
},
"TrainerController._save_models": {
"total": 0.12980205500025477,
"count": 1,
"self": 0.0020046860004185874,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12779736899983618,
"count": 1,
"self": 0.12779736899983618
}
}
}
}
}
}
}