ppo-Huggy / run_logs /timers.json
SeanLMH's picture
Huggy
1e940c1 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4054142236709595,
"min": 1.4054142236709595,
"max": 1.4257813692092896,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70730.28125,
"min": 68409.9765625,
"max": 77080.2734375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 105.92521367521367,
"min": 85.81818181818181,
"max": 407.0731707317073,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49573.0,
"min": 49088.0,
"max": 50070.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999990.0,
"min": 49948.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999990.0,
"min": 49948.0,
"max": 1999990.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3639047145843506,
"min": 0.0655822902917862,
"max": 2.418222427368164,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1106.307373046875,
"min": 8.066621780395508,
"max": 1358.427734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7115710114057245,
"min": 1.8597078354862648,
"max": 3.899992068925227,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1737.0152333378792,
"min": 228.74406376481056,
"max": 2098.3632075190544,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7115710114057245,
"min": 1.8597078354862648,
"max": 3.899992068925227,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1737.0152333378792,
"min": 228.74406376481056,
"max": 2098.3632075190544,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.0178321598127089,
"min": 0.012664918708226953,
"max": 0.02023060648288164,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0356643196254178,
"min": 0.025329837416453907,
"max": 0.06069181944864492,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04966866156707207,
"min": 0.02149637903397282,
"max": 0.059072498646047376,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09933732313414415,
"min": 0.04299275806794564,
"max": 0.17721749593814212,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.597598467499995e-06,
"min": 4.597598467499995e-06,
"max": 0.00029536537654487494,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.19519693499999e-06,
"min": 9.19519693499999e-06,
"max": 0.0008440651686449497,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10153250000000001,
"min": 0.10153250000000001,
"max": 0.19845512500000007,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20306500000000002,
"min": 0.20306500000000002,
"max": 0.58135505,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.647174999999992e-05,
"min": 8.647174999999992e-05,
"max": 0.004922910737500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00017294349999999985,
"min": 0.00017294349999999985,
"max": 0.014069616994999999,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1729500403",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1729504347"
},
"total": 3944.6039693479997,
"count": 1,
"self": 0.7102142819999244,
"children": {
"run_training.setup": {
"total": 0.11042439900001,
"count": 1,
"self": 0.11042439900001
},
"TrainerController.start_learning": {
"total": 3943.783330667,
"count": 1,
"self": 8.759123765019012,
"children": {
"TrainerController._reset_env": {
"total": 5.125281860999962,
"count": 1,
"self": 5.125281860999962
},
"TrainerController.advance": {
"total": 3929.730205859981,
"count": 232193,
"self": 8.55930653896985,
"children": {
"env_step": {
"total": 3265.790126001982,
"count": 232193,
"self": 2569.3039464049525,
"children": {
"SubprocessEnvManager._take_step": {
"total": 691.3640417081156,
"count": 232193,
"self": 32.42662416814983,
"children": {
"TorchPolicy.evaluate": {
"total": 658.9374175399657,
"count": 223022,
"self": 658.9374175399657
}
}
},
"workers": {
"total": 5.122137888914267,
"count": 232193,
"self": 0.0,
"children": {
"worker_root": {
"total": 3929.317108145953,
"count": 232193,
"is_parallel": true,
"self": 1872.7630294389228,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010830049999981384,
"count": 1,
"is_parallel": true,
"self": 0.0003413069999851359,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007416980000130025,
"count": 2,
"is_parallel": true,
"self": 0.0007416980000130025
}
}
},
"UnityEnvironment.step": {
"total": 0.0367936779999809,
"count": 1,
"is_parallel": true,
"self": 0.00047674099999994723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022344199999224656,
"count": 1,
"is_parallel": true,
"self": 0.00022344199999224656
},
"communicator.exchange": {
"total": 0.03525428800003283,
"count": 1,
"is_parallel": true,
"self": 0.03525428800003283
},
"steps_from_proto": {
"total": 0.0008392069999558771,
"count": 1,
"is_parallel": true,
"self": 0.0002597509999873182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005794559999685589,
"count": 2,
"is_parallel": true,
"self": 0.0005794559999685589
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2056.5540787070304,
"count": 232192,
"is_parallel": true,
"self": 64.83056385213854,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 121.69491896701959,
"count": 232192,
"is_parallel": true,
"self": 121.69491896701959
},
"communicator.exchange": {
"total": 1729.2669825319017,
"count": 232192,
"is_parallel": true,
"self": 1729.2669825319017
},
"steps_from_proto": {
"total": 140.76161335597067,
"count": 232192,
"is_parallel": true,
"self": 48.862977490840535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.89863586513013,
"count": 464384,
"is_parallel": true,
"self": 91.89863586513013
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.3807733190293,
"count": 232193,
"self": 12.927968748982153,
"children": {
"process_trajectory": {
"total": 225.44806973304776,
"count": 232193,
"self": 223.7871647430468,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6609049900009722,
"count": 10,
"self": 1.6609049900009722
}
}
},
"_update_policy": {
"total": 417.00473483699943,
"count": 96,
"self": 332.6667140020023,
"children": {
"TorchPPOOptimizer.update": {
"total": 84.33802083499711,
"count": 2880,
"self": 84.33802083499711
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4469997040578164e-06,
"count": 1,
"self": 1.4469997040578164e-06
},
"TrainerController._save_models": {
"total": 0.16871773399998347,
"count": 1,
"self": 0.004062254999553261,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1646554790004302,
"count": 1,
"self": 0.1646554790004302
}
}
}
}
}
}
}