ppo-Pyramids / run_logs /timers.json
eliept1's picture
Ppo Pyramids agent
63664ba
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.24936416745185852,
"min": 0.24936416745185852,
"max": 1.4051644802093506,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7464.9658203125,
"min": 7464.9658203125,
"max": 42627.0703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989881.0,
"min": 29952.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7536392211914062,
"min": -0.09098625928163528,
"max": 0.7536392211914062,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 217.80172729492188,
"min": -21.927688598632812,
"max": 220.515625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02987665869295597,
"min": 0.009689689613878727,
"max": 0.4815498888492584,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.634354591369629,
"min": 2.625905990600586,
"max": 114.12732696533203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06585097097701566,
"min": 0.0636963303458433,
"max": 0.0726176526505868,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9219135936782192,
"min": 0.4897592646433121,
"max": 1.0370636624332596,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014575587145342421,
"min": 0.0008916798511689057,
"max": 0.01626113687014352,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2040582200347939,
"min": 0.012079702417571168,
"max": 0.23911999024373165,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.575526046285715e-06,
"min": 7.575526046285715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000106057364648,
"min": 0.000106057364648,
"max": 0.0037609462463512994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252514285714286,
"min": 0.10252514285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.435352,
"min": 1.3886848,
"max": 2.6536486999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026226177142857143,
"min": 0.00026226177142857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036716648,
"min": 0.0036716648,
"max": 0.12537950513,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013514531776309013,
"min": 0.013514531776309013,
"max": 0.3805174231529236,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1892034411430359,
"min": 0.1892034411430359,
"max": 2.6636219024658203,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 255.78947368421052,
"min": 244.1496062992126,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29160.0,
"min": 15984.0,
"max": 32490.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.726659633219242,
"min": -1.0000000521540642,
"max": 1.7400960483419614,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 196.8391981869936,
"min": -28.52100171893835,
"max": 220.9921981394291,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.726659633219242,
"min": -1.0000000521540642,
"max": 1.7400960483419614,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 196.8391981869936,
"min": -28.52100171893835,
"max": 220.9921981394291,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03599115809986854,
"min": 0.033900316211112634,
"max": 7.425204239785671,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.1029920233850135,
"min": 4.0881626792106545,
"max": 118.80326783657074,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693390350",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693393324"
},
"total": 2974.1801832939996,
"count": 1,
"self": 0.6777648669999508,
"children": {
"run_training.setup": {
"total": 0.05686271800004761,
"count": 1,
"self": 0.05686271800004761
},
"TrainerController.start_learning": {
"total": 2973.4455557089996,
"count": 1,
"self": 1.9235907588877126,
"children": {
"TrainerController._reset_env": {
"total": 1.372548924000057,
"count": 1,
"self": 1.372548924000057
},
"TrainerController.advance": {
"total": 2970.042195750112,
"count": 64211,
"self": 1.8871579190017655,
"children": {
"env_step": {
"total": 1913.17172440614,
"count": 64211,
"self": 1797.9218683101192,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.09509390504218,
"count": 64211,
"self": 5.727200922096017,
"children": {
"TorchPolicy.evaluate": {
"total": 108.36789298294616,
"count": 62562,
"self": 108.36789298294616
}
}
},
"workers": {
"total": 1.1547621909785448,
"count": 64211,
"self": 0.0,
"children": {
"worker_root": {
"total": 2967.0952982510935,
"count": 64211,
"is_parallel": true,
"self": 1313.792350405075,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029483349999281927,
"count": 1,
"is_parallel": true,
"self": 0.00096883300011541,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019795019998127827,
"count": 8,
"is_parallel": true,
"self": 0.0019795019998127827
}
}
},
"UnityEnvironment.step": {
"total": 0.05716452699994079,
"count": 1,
"is_parallel": true,
"self": 0.0006365139997797087,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041577999991204706,
"count": 1,
"is_parallel": true,
"self": 0.00041577999991204706
},
"communicator.exchange": {
"total": 0.05368748800015055,
"count": 1,
"is_parallel": true,
"self": 0.05368748800015055
},
"steps_from_proto": {
"total": 0.0024247450000984827,
"count": 1,
"is_parallel": true,
"self": 0.00047059900020940404,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019541459998890787,
"count": 8,
"is_parallel": true,
"self": 0.0019541459998890787
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1653.3029478460185,
"count": 64210,
"is_parallel": true,
"self": 40.84065656018629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.603716941963285,
"count": 64210,
"is_parallel": true,
"self": 24.603716941963285
},
"communicator.exchange": {
"total": 1456.0326902619142,
"count": 64210,
"is_parallel": true,
"self": 1456.0326902619142
},
"steps_from_proto": {
"total": 131.82588408195465,
"count": 64210,
"is_parallel": true,
"self": 27.1126979592309,
"children": {
"_process_rank_one_or_two_observation": {
"total": 104.71318612272376,
"count": 513680,
"is_parallel": true,
"self": 104.71318612272376
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1054.9833134249707,
"count": 64211,
"self": 3.543834120950123,
"children": {
"process_trajectory": {
"total": 137.48813881101614,
"count": 64211,
"self": 137.248437354016,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23970145700013745,
"count": 2,
"self": 0.23970145700013745
}
}
},
"_update_policy": {
"total": 913.9513404930044,
"count": 454,
"self": 424.4370314940063,
"children": {
"TorchPPOOptimizer.update": {
"total": 489.5143089989981,
"count": 22761,
"self": 489.5143089989981
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1459997040219605e-06,
"count": 1,
"self": 1.1459997040219605e-06
},
"TrainerController._save_models": {
"total": 0.10721912999997585,
"count": 1,
"self": 0.0014496280000457773,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10576950199993007,
"count": 1,
"self": 0.10576950199993007
}
}
}
}
}
}
}