ppo-Huggy / run_logs /timers.json
wlchee's picture
Willy Huggy
834dbe1 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4109584093093872,
"min": 1.4109524488449097,
"max": 1.430421233177185,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69532.03125,
"min": 68999.265625,
"max": 76575.671875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 93.2811320754717,
"min": 88.78494623655914,
"max": 407.6910569105691,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49439.0,
"min": 48884.0,
"max": 50146.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999981.0,
"min": 49519.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999981.0,
"min": 49519.0,
"max": 1999981.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3295438289642334,
"min": 0.023999741300940514,
"max": 2.429175615310669,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1236.98779296875,
"min": 2.9279685020446777,
"max": 1302.7757568359375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.6713607834332858,
"min": 1.8121163608109365,
"max": 3.8390875656173065,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1949.4925760030746,
"min": 221.07819601893425,
"max": 2043.8091655373573,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.6713607834332858,
"min": 1.8121163608109365,
"max": 3.8390875656173065,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1949.4925760030746,
"min": 221.07819601893425,
"max": 2043.8091655373573,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01620242766754624,
"min": 0.013316165359477357,
"max": 0.018963276022016848,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03240485533509248,
"min": 0.026632330718954714,
"max": 0.0550712417869363,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05448947008699179,
"min": 0.0234168722294271,
"max": 0.0594063775613904,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.10897894017398357,
"min": 0.0468337444588542,
"max": 0.17378326406081518,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.489298503599992e-06,
"min": 4.489298503599992e-06,
"max": 0.0002953257015581,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 8.978597007199984e-06,
"min": 8.978597007199984e-06,
"max": 0.0008437705687431499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10149640000000001,
"min": 0.10149640000000001,
"max": 0.19844189999999995,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20299280000000003,
"min": 0.20299280000000003,
"max": 0.58125685,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.467035999999989e-05,
"min": 8.467035999999989e-05,
"max": 0.004922250809999998,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00016934071999999978,
"min": 0.00016934071999999978,
"max": 0.014064716815,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741678950",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741681344"
},
"total": 2394.0393705779998,
"count": 1,
"self": 0.442030856999736,
"children": {
"run_training.setup": {
"total": 0.022936631999982637,
"count": 1,
"self": 0.022936631999982637
},
"TrainerController.start_learning": {
"total": 2393.574403089,
"count": 1,
"self": 4.102576235877677,
"children": {
"TrainerController._reset_env": {
"total": 3.282786576000035,
"count": 1,
"self": 3.282786576000035
},
"TrainerController.advance": {
"total": 2386.0823519011224,
"count": 231629,
"self": 4.323116402176765,
"children": {
"env_step": {
"total": 1891.03314429899,
"count": 231629,
"self": 1480.0646121958985,
"children": {
"SubprocessEnvManager._take_step": {
"total": 408.3976098231037,
"count": 231629,
"self": 15.306342538004742,
"children": {
"TorchPolicy.evaluate": {
"total": 393.091267285099,
"count": 222931,
"self": 393.091267285099
}
}
},
"workers": {
"total": 2.5709222799877125,
"count": 231629,
"self": 0.0,
"children": {
"worker_root": {
"total": 2386.424671478069,
"count": 231629,
"is_parallel": true,
"self": 1174.9959840660756,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012201499999946464,
"count": 1,
"is_parallel": true,
"self": 0.0003820649999397574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000838085000054889,
"count": 2,
"is_parallel": true,
"self": 0.000838085000054889
}
}
},
"UnityEnvironment.step": {
"total": 0.029749161000040658,
"count": 1,
"is_parallel": true,
"self": 0.0003686130000915,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018794699997215503,
"count": 1,
"is_parallel": true,
"self": 0.00018794699997215503
},
"communicator.exchange": {
"total": 0.028498475999981565,
"count": 1,
"is_parallel": true,
"self": 0.028498475999981565
},
"steps_from_proto": {
"total": 0.000694124999995438,
"count": 1,
"is_parallel": true,
"self": 0.00019343200006005645,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005006929999353815,
"count": 2,
"is_parallel": true,
"self": 0.0005006929999353815
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1211.4286874119932,
"count": 231628,
"is_parallel": true,
"self": 36.50284377392609,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 78.90569744896857,
"count": 231628,
"is_parallel": true,
"self": 78.90569744896857
},
"communicator.exchange": {
"total": 1011.9689620100206,
"count": 231628,
"is_parallel": true,
"self": 1011.9689620100206
},
"steps_from_proto": {
"total": 84.05118417907784,
"count": 231628,
"is_parallel": true,
"self": 29.140188429034424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.91099575004341,
"count": 463256,
"is_parallel": true,
"self": 54.91099575004341
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 490.7260911999558,
"count": 231629,
"self": 6.18881414007592,
"children": {
"process_trajectory": {
"total": 151.6315547928806,
"count": 231629,
"self": 150.37891134788129,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2526434449993076,
"count": 10,
"self": 1.2526434449993076
}
}
},
"_update_policy": {
"total": 332.9057222669993,
"count": 96,
"self": 266.2135722820076,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.69214998499172,
"count": 2880,
"self": 66.69214998499172
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.629999683762435e-07,
"count": 1,
"self": 8.629999683762435e-07
},
"TrainerController._save_models": {
"total": 0.10668751299999713,
"count": 1,
"self": 0.0018082169999615871,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10487929600003554,
"count": 1,
"self": 0.10487929600003554
}
}
}
}
}
}
}