PPO-Pyramids / run_logs /timers.json
cway140's picture
first push
779f1c2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.304462730884552,
"min": 0.299028217792511,
"max": 1.4163148403167725,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9197.2099609375,
"min": 8875.1572265625,
"max": 42965.328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6477260589599609,
"min": -0.12461249530315399,
"max": 0.6683183908462524,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 190.43145751953125,
"min": -29.533161163330078,
"max": 190.43145751953125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02307054214179516,
"min": -0.0350244976580143,
"max": 0.5136933326721191,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.782739639282227,
"min": -9.701786041259766,
"max": 121.74532318115234,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06957311976131879,
"min": 0.06429952175434057,
"max": 0.07408421210191872,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0435967964197819,
"min": 0.518589484713431,
"max": 1.061734863081559,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014004665177910486,
"min": 0.002067388477669017,
"max": 0.01574181279055683,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2100699776686573,
"min": 0.020967757159698647,
"max": 0.22038537906779562,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.513257495613334e-06,
"min": 7.513257495613334e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011269886243420001,
"min": 0.00011269886243420001,
"max": 0.0036307939897354006,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250438666666666,
"min": 0.10250438666666666,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375657999999999,
"min": 1.3886848,
"max": 2.6102646000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002601882280000001,
"min": 0.0002601882280000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003902823420000001,
"min": 0.003902823420000001,
"max": 0.12104543354,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016299718990921974,
"min": 0.016299718990921974,
"max": 0.6300429701805115,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.24449579417705536,
"min": 0.23717160522937775,
"max": 4.4103007316589355,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 293.4770642201835,
"min": 293.4770642201835,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31989.0,
"min": 15984.0,
"max": 33194.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6331009004914432,
"min": -1.0000000521540642,
"max": 1.6965318981637345,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 178.0079981535673,
"min": -27.225001640617847,
"max": 178.4133977741003,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6331009004914432,
"min": -1.0000000521540642,
"max": 1.6965318981637345,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 178.0079981535673,
"min": -27.225001640617847,
"max": 178.4133977741003,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04896778064174364,
"min": 0.04896778064174364,
"max": 12.931424146518111,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.337488089950057,
"min": 4.96845352841774,
"max": 206.90278634428978,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1769053821",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training3 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1769057449"
},
"total": 3628.638995685,
"count": 1,
"self": 1.1980585639994388,
"children": {
"run_training.setup": {
"total": 0.05302349399994455,
"count": 1,
"self": 0.05302349399994455
},
"TrainerController.start_learning": {
"total": 3627.3879136270007,
"count": 1,
"self": 2.171162664975782,
"children": {
"TrainerController._reset_env": {
"total": 2.935486904000072,
"count": 1,
"self": 2.935486904000072
},
"TrainerController.advance": {
"total": 3622.1600245130244,
"count": 64165,
"self": 2.2672469220037783,
"children": {
"env_step": {
"total": 2598.264390323081,
"count": 64165,
"self": 2371.8259663991166,
"children": {
"SubprocessEnvManager._take_step": {
"total": 225.16596518994947,
"count": 64165,
"self": 6.555980498946383,
"children": {
"TorchPolicy.evaluate": {
"total": 218.6099846910031,
"count": 62574,
"self": 218.6099846910031
}
}
},
"workers": {
"total": 1.2724587340151174,
"count": 64165,
"self": 0.0,
"children": {
"worker_root": {
"total": 3598.1343443691408,
"count": 64165,
"is_parallel": true,
"self": 1452.5604791422038,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002380784000024505,
"count": 1,
"is_parallel": true,
"self": 0.0006512020004265651,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00172958199959794,
"count": 8,
"is_parallel": true,
"self": 0.00172958199959794
}
}
},
"UnityEnvironment.step": {
"total": 0.08439524300001722,
"count": 1,
"is_parallel": true,
"self": 0.0005523690001609793,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043442299988782906,
"count": 1,
"is_parallel": true,
"self": 0.00043442299988782906
},
"communicator.exchange": {
"total": 0.07972791900010634,
"count": 1,
"is_parallel": true,
"self": 0.07972791900010634
},
"steps_from_proto": {
"total": 0.003680531999862069,
"count": 1,
"is_parallel": true,
"self": 0.002365908000001582,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001314623999860487,
"count": 8,
"is_parallel": true,
"self": 0.001314623999860487
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2145.573865226937,
"count": 64164,
"is_parallel": true,
"self": 48.40962821090079,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.414565838018007,
"count": 64164,
"is_parallel": true,
"self": 24.414565838018007
},
"communicator.exchange": {
"total": 1876.1557742769812,
"count": 64164,
"is_parallel": true,
"self": 1876.1557742769812
},
"steps_from_proto": {
"total": 196.59389690103694,
"count": 64164,
"is_parallel": true,
"self": 53.527349632193136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 143.0665472688438,
"count": 513312,
"is_parallel": true,
"self": 143.0665472688438
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1021.6283872679396,
"count": 64165,
"self": 4.172536632953779,
"children": {
"process_trajectory": {
"total": 195.83578177498566,
"count": 64165,
"self": 195.5771347039854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2586470710002686,
"count": 2,
"self": 0.2586470710002686
}
}
},
"_update_policy": {
"total": 821.6200688600002,
"count": 457,
"self": 498.33796606796136,
"children": {
"TorchPPOOptimizer.update": {
"total": 323.2821027920388,
"count": 22773,
"self": 323.2821027920388
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3170001693652011e-06,
"count": 1,
"self": 1.3170001693652011e-06
},
"TrainerController._save_models": {
"total": 0.12123822800003836,
"count": 1,
"self": 0.0013147399995432352,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11992348800049513,
"count": 1,
"self": 0.11992348800049513
}
}
}
}
}
}
}