ppo-Pyramids / run_logs /timers.json
hsinyen5's picture
First Push
5c2835b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3204606771469116,
"min": 0.3204606771469116,
"max": 1.4380348920822144,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9695.8583984375,
"min": 9663.740234375,
"max": 43624.2265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989885.0,
"min": 29952.0,
"max": 989885.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989885.0,
"min": 29952.0,
"max": 989885.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.466636598110199,
"min": -0.1002633273601532,
"max": 0.466636598110199,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 127.39179229736328,
"min": -24.26372528076172,
"max": 127.39179229736328,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018276957795023918,
"min": -0.0992346853017807,
"max": 0.34687748551368713,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.989609718322754,
"min": -25.503314971923828,
"max": 82.2099609375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06738492087641221,
"min": 0.06464911185455322,
"max": 0.07427107470427348,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9433888922697709,
"min": 0.4959740371720319,
"max": 1.1140661205641023,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015650139081168184,
"min": 0.00015737815284343418,
"max": 0.016435512301082596,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21910194713635459,
"min": 0.00188853783412121,
"max": 0.23009717221515635,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.52579034857857e-06,
"min": 7.52579034857857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010536106488009999,
"min": 0.00010536106488009999,
"max": 0.003382467272511,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250856428571431,
"min": 0.10250856428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351199000000003,
"min": 1.3886848,
"max": 2.5274889999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002606055721428572,
"min": 0.0002606055721428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003648478010000001,
"min": 0.003648478010000001,
"max": 0.1127761511,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012823781929910183,
"min": 0.012823781929910183,
"max": 0.48417872190475464,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1795329451560974,
"min": 0.1795329451560974,
"max": 3.3892509937286377,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 385.4512195121951,
"min": 385.4512195121951,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31607.0,
"min": 15984.0,
"max": 32437.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5169707185974934,
"min": -1.0000000521540642,
"max": 1.5169707185974934,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 124.39159892499447,
"min": -31.998801663517952,
"max": 124.39159892499447,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5169707185974934,
"min": -1.0000000521540642,
"max": 1.5169707185974934,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 124.39159892499447,
"min": -31.998801663517952,
"max": 124.39159892499447,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05219911911351629,
"min": 0.05219911911351629,
"max": 9.59180692769587,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.280327767308336,
"min": 3.860951673996169,
"max": 153.46891084313393,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747984006",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747986206"
},
"total": 2200.400611159,
"count": 1,
"self": 0.48449555200022587,
"children": {
"run_training.setup": {
"total": 0.019645743000182847,
"count": 1,
"self": 0.019645743000182847
},
"TrainerController.start_learning": {
"total": 2199.8964698639998,
"count": 1,
"self": 1.178565344992876,
"children": {
"TrainerController._reset_env": {
"total": 2.983486834999894,
"count": 1,
"self": 2.983486834999894
},
"TrainerController.advance": {
"total": 2195.6548800390074,
"count": 63694,
"self": 1.3125825480692583,
"children": {
"env_step": {
"total": 1513.1208656649105,
"count": 63694,
"self": 1365.2754716608806,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.13138540899308,
"count": 63694,
"self": 4.510528901964335,
"children": {
"TorchPolicy.evaluate": {
"total": 142.62085650702875,
"count": 62562,
"self": 142.62085650702875
}
}
},
"workers": {
"total": 0.7140085950368302,
"count": 63694,
"self": 0.0,
"children": {
"worker_root": {
"total": 2195.1157754880282,
"count": 63694,
"is_parallel": true,
"self": 938.015990575957,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024426180002592446,
"count": 1,
"is_parallel": true,
"self": 0.0006919320007909846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00175068599946826,
"count": 8,
"is_parallel": true,
"self": 0.00175068599946826
}
}
},
"UnityEnvironment.step": {
"total": 0.10217435699996713,
"count": 1,
"is_parallel": true,
"self": 0.000564627000130713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000456632999885187,
"count": 1,
"is_parallel": true,
"self": 0.000456632999885187
},
"communicator.exchange": {
"total": 0.09943775700003243,
"count": 1,
"is_parallel": true,
"self": 0.09943775700003243
},
"steps_from_proto": {
"total": 0.0017153399999187968,
"count": 1,
"is_parallel": true,
"self": 0.00037462999989656964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013407100000222272,
"count": 8,
"is_parallel": true,
"self": 0.0013407100000222272
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1257.0997849120713,
"count": 63693,
"is_parallel": true,
"self": 31.24973714223779,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.99130015084438,
"count": 63693,
"is_parallel": true,
"self": 22.99130015084438
},
"communicator.exchange": {
"total": 1110.690419899061,
"count": 63693,
"is_parallel": true,
"self": 1110.690419899061
},
"steps_from_proto": {
"total": 92.16832771992813,
"count": 63693,
"is_parallel": true,
"self": 18.145130910162152,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.02319680976598,
"count": 509544,
"is_parallel": true,
"self": 74.02319680976598
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 681.2214318260276,
"count": 63694,
"self": 2.3485153890474066,
"children": {
"process_trajectory": {
"total": 125.60545207797804,
"count": 63694,
"self": 125.41616325597761,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18928882200043518,
"count": 2,
"self": 0.18928882200043518
}
}
},
"_update_policy": {
"total": 553.2674643590021,
"count": 451,
"self": 306.6107415170127,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.65672284198945,
"count": 22794,
"self": 246.65672284198945
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.349996616947465e-07,
"count": 1,
"self": 9.349996616947465e-07
},
"TrainerController._save_models": {
"total": 0.07953670999995666,
"count": 1,
"self": 0.0013343699993129121,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07820234000064374,
"count": 1,
"self": 0.07820234000064374
}
}
}
}
}
}
}