| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 0.34050318598747253, | |
| "min": 0.34050318598747253, | |
| "max": 1.3917882442474365, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 10231.439453125, | |
| "min": 10231.439453125, | |
| "max": 42221.2890625, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 989943.0, | |
| "min": 29950.0, | |
| "max": 989943.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 989943.0, | |
| "min": 29950.0, | |
| "max": 989943.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": 0.6623512506484985, | |
| "min": -0.08478499948978424, | |
| "max": 0.6809508204460144, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": 189.43246459960938, | |
| "min": -20.433185577392578, | |
| "max": 199.51858520507812, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.011695773340761662, | |
| "min": 0.0017226093914359808, | |
| "max": 0.40394869446754456, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 3.3449912071228027, | |
| "min": 0.5047245621681213, | |
| "max": 96.13978576660156, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.0647931088556807, | |
| "min": 0.0646489717132974, | |
| "max": 0.073572663222957, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.9071035239795296, | |
| "min": 0.6083178795591048, | |
| "max": 1.103589948344355, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.01532207807905565, | |
| "min": 0.0010567516142613803, | |
| "max": 0.017456777729857088, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.2145090931067791, | |
| "min": 0.013737770985397945, | |
| "max": 0.2587317506161829, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.574947475050001e-06, | |
| "min": 7.574947475050001e-06, | |
| "max": 0.00029486920171026667, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.00010604926465070001, | |
| "min": 0.00010604926465070001, | |
| "max": 0.003635940188019999, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.10252494999999999, | |
| "min": 0.10252494999999999, | |
| "max": 0.19828973333333333, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.4353493, | |
| "min": 1.4353493, | |
| "max": 2.6119800000000004, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0002622425050000001, | |
| "min": 0.0002622425050000001, | |
| "max": 0.00982914436, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.003671395070000001, | |
| "min": 0.003671395070000001, | |
| "max": 0.12121680200000001, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.011863932013511658, | |
| "min": 0.01173480600118637, | |
| "max": 0.4767794609069824, | |
| "count": 33 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.1660950481891632, | |
| "min": 0.1642872840166092, | |
| "max": 4.291015148162842, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 300.2718446601942, | |
| "min": 277.96190476190475, | |
| "max": 991.7741935483871, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 30928.0, | |
| "min": 17260.0, | |
| "max": 32819.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": 1.6803087210192265, | |
| "min": -0.9280258561334302, | |
| "max": 1.7029828373874938, | |
| "count": 33 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": 173.07179826498032, | |
| "min": -28.768801540136337, | |
| "max": 182.74899853020906, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": 1.6803087210192265, | |
| "min": -0.9280258561334302, | |
| "max": 1.7029828373874938, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": 173.07179826498032, | |
| "min": -28.768801540136337, | |
| "max": 182.74899853020906, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 0.03655006798454425, | |
| "min": 0.03368786300222079, | |
| "max": 9.56843827995989, | |
| "count": 33 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 3.7646570024080575, | |
| "min": 3.537225615233183, | |
| "max": 172.23188903927803, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 33 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1684059377", | |
| "python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics", | |
| "mlagents_version": "0.31.0.dev0", | |
| "mlagents_envs_version": "0.31.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "1.11.0+cu102", | |
| "numpy_version": "1.21.2", | |
| "end_time_seconds": "1684061596" | |
| }, | |
| "total": 2218.33576221, | |
| "count": 1, | |
| "self": 0.4883241220004493, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.04326836000001322, | |
| "count": 1, | |
| "self": 0.04326836000001322 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 2217.8041697279996, | |
| "count": 1, | |
| "self": 1.2502792679815684, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 4.033931236000001, | |
| "count": 1, | |
| "self": 4.033931236000001 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 2212.4295656810177, | |
| "count": 64103, | |
| "self": 1.2998243560418814, | |
| "children": { | |
| "env_step": { | |
| "total": 1577.8959774309733, | |
| "count": 64103, | |
| "self": 1474.4990288549027, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 102.64203911303872, | |
| "count": 64103, | |
| "self": 4.601023134035927, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 98.0410159790028, | |
| "count": 62551, | |
| "self": 98.0410159790028 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.7549094630318223, | |
| "count": 64103, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 2213.104311142039, | |
| "count": 64103, | |
| "is_parallel": true, | |
| "self": 847.656387979993, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.005474859000003107, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.004050574000018514, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.001424284999984593, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.001424284999984593 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.04817116200001692, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.000585257000011552, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.0004970240000261583, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0004970240000261583 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.04529256899996881, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.04529256899996881 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.0017963120000104027, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00036980499999117455, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0014265070000192281, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0014265070000192281 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 1365.447923162046, | |
| "count": 64102, | |
| "is_parallel": true, | |
| "self": 32.201219808004225, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 22.344287013000155, | |
| "count": 64102, | |
| "is_parallel": true, | |
| "self": 22.344287013000155 | |
| }, | |
| "communicator.exchange": { | |
| "total": 1213.8117447110476, | |
| "count": 64102, | |
| "is_parallel": true, | |
| "self": 1213.8117447110476 | |
| }, | |
| "steps_from_proto": { | |
| "total": 97.090671629994, | |
| "count": 64102, | |
| "is_parallel": true, | |
| "self": 19.627042863029374, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 77.46362876696463, | |
| "count": 512816, | |
| "is_parallel": true, | |
| "self": 77.46362876696463 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 633.2337638940024, | |
| "count": 64103, | |
| "self": 2.528721767988145, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 107.09841935801421, | |
| "count": 64103, | |
| "self": 106.82166575301392, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.27675360500029456, | |
| "count": 2, | |
| "self": 0.27675360500029456 | |
| } | |
| } | |
| }, | |
| "_update_policy": { | |
| "total": 523.6066227680001, | |
| "count": 462, | |
| "self": 339.81862560500326, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 183.78799716299682, | |
| "count": 22782, | |
| "self": 183.78799716299682 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.1699999049596954e-06, | |
| "count": 1, | |
| "self": 1.1699999049596954e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.09039237300021341, | |
| "count": 1, | |
| "self": 0.0014130830004432937, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.08897928999977012, | |
| "count": 1, | |
| "self": 0.08897928999977012 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |