ppo-Pyramids / run_logs /timers.json
carlisleking's picture
First Push
415f17a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6857642531394958,
"min": 0.6857642531394958,
"max": 1.4782092571258545,
"count": 10
},
"Pyramids.Policy.Entropy.sum": {
"value": 20452.232421875,
"min": 20452.232421875,
"max": 44842.95703125,
"count": 10
},
"Pyramids.Step.mean": {
"value": 299875.0,
"min": 29952.0,
"max": 299875.0,
"count": 10
},
"Pyramids.Step.sum": {
"value": 299875.0,
"min": 29952.0,
"max": 299875.0,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08650875091552734,
"min": -0.16226035356521606,
"max": -0.05395248904824257,
"count": 10
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -20.762100219726562,
"min": -38.45570373535156,
"max": -13.110454559326172,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.038227494806051254,
"min": 0.038227494806051254,
"max": 0.25249892473220825,
"count": 10
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.174598693847656,
"min": 9.174598693847656,
"max": 60.5997428894043,
"count": 10
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06621371803462482,
"min": 0.06621371803462482,
"max": 0.07140908086001309,
"count": 10
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8607783344501226,
"min": 0.4946522531454216,
"max": 0.9805612934831671,
"count": 10
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0010210787272243063,
"min": 0.0010210787272243063,
"max": 0.006060343406106811,
"count": 10
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.01327402345391598,
"min": 0.01327402345391598,
"max": 0.042422403842747675,
"count": 10
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5054018058948717e-05,
"min": 1.5054018058948717e-05,
"max": 0.0002838354339596191,
"count": 10
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00019570223476633332,
"min": 0.00019570223476633332,
"max": 0.0024611702796100005,
"count": 10
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10501797435897438,
"min": 0.10501797435897438,
"max": 0.19461180952380958,
"count": 10
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.365233666666667,
"min": 1.362282666666667,
"max": 2.032080666666667,
"count": 10
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0005112956384615383,
"min": 0.0005112956384615383,
"max": 0.00946171977142857,
"count": 10
},
"Pyramids.Policy.Beta.sum": {
"value": 0.006646843299999998,
"min": 0.006646843299999998,
"max": 0.082066961,
"count": 10
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.03791473060846329,
"min": 0.03791473060846329,
"max": 0.4183076322078705,
"count": 10
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.49289149045944214,
"min": 0.49289149045944214,
"max": 2.9281535148620605,
"count": 10
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 985.9677419354839,
"min": 927.3030303030303,
"max": 999.0,
"count": 10
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30565.0,
"min": 15984.0,
"max": 32356.0,
"count": 10
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9221355367091394,
"min": -1.0000000521540642,
"max": -0.6249030781063166,
"count": 10
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -28.586201637983322,
"min": -29.99300166219473,
"max": -16.000000834465027,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9221355367091394,
"min": -1.0000000521540642,
"max": -0.6249030781063166,
"count": 10
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -28.586201637983322,
"min": -29.99300166219473,
"max": -16.000000834465027,
"count": 10
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.3934369310135803,
"min": 0.3934369310135803,
"max": 8.543423502705991,
"count": 10
},
"Pyramids.Policy.RndReward.sum": {
"value": 12.196544861420989,
"min": 10.666800018399954,
"max": 136.69477604329586,
"count": 10
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718358598",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718359186"
},
"total": 587.3496648420002,
"count": 1,
"self": 0.5282119700001431,
"children": {
"run_training.setup": {
"total": 0.0785807479999221,
"count": 1,
"self": 0.0785807479999221
},
"TrainerController.start_learning": {
"total": 586.7428721240001,
"count": 1,
"self": 0.37381806600171785,
"children": {
"TrainerController._reset_env": {
"total": 2.4277736110000205,
"count": 1,
"self": 2.4277736110000205
},
"TrainerController.advance": {
"total": 583.8317824519983,
"count": 18905,
"self": 0.4029481010067002,
"children": {
"env_step": {
"total": 393.4856471010082,
"count": 18905,
"self": 354.1138093499974,
"children": {
"SubprocessEnvManager._take_step": {
"total": 39.146488250014954,
"count": 18905,
"self": 1.3435121500174318,
"children": {
"TorchPolicy.evaluate": {
"total": 37.80297609999752,
"count": 18801,
"self": 37.80297609999752
}
}
},
"workers": {
"total": 0.22534950099588968,
"count": 18905,
"self": 0.0,
"children": {
"worker_root": {
"total": 585.3678140970013,
"count": 18905,
"is_parallel": true,
"self": 265.658549171,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019624290000592737,
"count": 1,
"is_parallel": true,
"self": 0.000582765000103791,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013796639999554827,
"count": 8,
"is_parallel": true,
"self": 0.0013796639999554827
}
}
},
"UnityEnvironment.step": {
"total": 0.04633306799996717,
"count": 1,
"is_parallel": true,
"self": 0.0006468049999739378,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004530630000090241,
"count": 1,
"is_parallel": true,
"self": 0.0004530630000090241
},
"communicator.exchange": {
"total": 0.0436490810000123,
"count": 1,
"is_parallel": true,
"self": 0.0436490810000123
},
"steps_from_proto": {
"total": 0.0015841189999719063,
"count": 1,
"is_parallel": true,
"self": 0.000339096999823596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012450220001483103,
"count": 8,
"is_parallel": true,
"self": 0.0012450220001483103
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 319.7092649260013,
"count": 18904,
"is_parallel": true,
"self": 9.844938205995845,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.673449753999989,
"count": 18904,
"is_parallel": true,
"self": 6.673449753999989
},
"communicator.exchange": {
"total": 275.0330481049888,
"count": 18904,
"is_parallel": true,
"self": 275.0330481049888
},
"steps_from_proto": {
"total": 28.157828861016696,
"count": 18904,
"is_parallel": true,
"self": 5.544785575991568,
"children": {
"_process_rank_one_or_two_observation": {
"total": 22.613043285025128,
"count": 151232,
"is_parallel": true,
"self": 22.613043285025128
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 189.94318724998334,
"count": 18905,
"self": 0.5889082929655842,
"children": {
"process_trajectory": {
"total": 36.676568237016,
"count": 18905,
"self": 36.676568237016
},
"_update_policy": {
"total": 152.67771072000176,
"count": 120,
"self": 90.09042442700434,
"children": {
"TorchPPOOptimizer.update": {
"total": 62.58728629299742,
"count": 6858,
"self": 62.58728629299742
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.769999683659989e-07,
"count": 1,
"self": 7.769999683659989e-07
},
"TrainerController._save_models": {
"total": 0.10949721800011503,
"count": 1,
"self": 0.001699325000117824,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1077978929999972,
"count": 1,
"self": 0.1077978929999972
}
}
}
}
}
}
}