ppo-PyramidsRND / run_logs /timers.json
WillLedd's picture
First Pyramids Commit
52b85b7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5914678573608398,
"min": 0.5914678573608398,
"max": 1.5498406887054443,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17573.693359375,
"min": 17573.693359375,
"max": 47015.96875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989907.0,
"min": 29952.0,
"max": 989907.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3972925543785095,
"min": -0.10856856405735016,
"max": 0.4440023601055145,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 104.4879379272461,
"min": -25.730749130249023,
"max": 118.54862976074219,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025385025888681412,
"min": 0.013125641271471977,
"max": 0.36257070302963257,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.676261901855469,
"min": 3.386415481567383,
"max": 87.0169677734375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.069203066554716,
"min": 0.06703047357552264,
"max": 0.07450143988298132,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.553624532437728,
"min": 0.29800575953192526,
"max": 0.5874809332647905,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010912567718757296,
"min": 0.00012190790923092381,
"max": 0.01723728170977497,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.08730054175005837,
"min": 0.0008533553646164667,
"max": 0.08730054175005837,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.482360005912504e-06,
"min": 7.482360005912504e-06,
"max": 0.00029544960151679995,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.985888004730003e-05,
"min": 5.985888004730003e-05,
"max": 0.0018811671729443,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024940875,
"min": 0.1024940875,
"max": 0.1984832,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.8199527,
"min": 0.7393101999999999,
"max": 1.3963352,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025915934125000017,
"min": 0.00025915934125000017,
"max": 0.009848471680000002,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0020732747300000014,
"min": 0.0020732747300000014,
"max": 0.06271286443,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009718617424368858,
"min": 0.009718617424368858,
"max": 0.7092474699020386,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07774893939495087,
"min": 0.07179825752973557,
"max": 2.8369898796081543,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 450.984375,
"min": 438.59375,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28863.0,
"min": 15984.0,
"max": 33376.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2364249657839537,
"min": -1.0000000521540642,
"max": 1.462244044547364,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 79.13119781017303,
"min": -32.000001668930054,
"max": 94.61919783055782,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2364249657839537,
"min": -1.0000000521540642,
"max": 1.462244044547364,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 79.13119781017303,
"min": -32.000001668930054,
"max": 94.61919783055782,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04542188627829091,
"min": 0.04542188627829091,
"max": 15.732642381452024,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9070007218106184,
"min": 2.9070007218106184,
"max": 251.72227810323238,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1758030659",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1758032699"
},
"total": 2040.3091723469997,
"count": 1,
"self": 0.5379893070003163,
"children": {
"run_training.setup": {
"total": 0.022877733999848715,
"count": 1,
"self": 0.022877733999848715
},
"TrainerController.start_learning": {
"total": 2039.7483053059996,
"count": 1,
"self": 1.4017741990401191,
"children": {
"TrainerController._reset_env": {
"total": 2.045414545999847,
"count": 1,
"self": 2.045414545999847
},
"TrainerController.advance": {
"total": 2036.2170870749587,
"count": 63537,
"self": 1.3925721941905067,
"children": {
"env_step": {
"total": 1372.1432063828852,
"count": 63537,
"self": 1224.2531805109265,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.0683539329575,
"count": 63537,
"self": 4.6730461769338945,
"children": {
"TorchPolicy.evaluate": {
"total": 142.3953077560236,
"count": 62554,
"self": 142.3953077560236
}
}
},
"workers": {
"total": 0.8216719390011349,
"count": 63537,
"self": 0.0,
"children": {
"worker_root": {
"total": 2035.169259918729,
"count": 63537,
"is_parallel": true,
"self": 921.9925240726843,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017861339993032743,
"count": 1,
"is_parallel": true,
"self": 0.0005718180000258144,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00121431599927746,
"count": 8,
"is_parallel": true,
"self": 0.00121431599927746
}
}
},
"UnityEnvironment.step": {
"total": 0.045233213999381405,
"count": 1,
"is_parallel": true,
"self": 0.00052212499849702,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004426810000950354,
"count": 1,
"is_parallel": true,
"self": 0.0004426810000950354
},
"communicator.exchange": {
"total": 0.04256386300039594,
"count": 1,
"is_parallel": true,
"self": 0.04256386300039594
},
"steps_from_proto": {
"total": 0.0017045450003934093,
"count": 1,
"is_parallel": true,
"self": 0.00039083099909476005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013137140012986492,
"count": 8,
"is_parallel": true,
"self": 0.0013137140012986492
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1113.1767358460447,
"count": 63536,
"is_parallel": true,
"self": 31.22656364215436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.386675785029183,
"count": 63536,
"is_parallel": true,
"self": 22.386675785029183
},
"communicator.exchange": {
"total": 964.8668360168695,
"count": 63536,
"is_parallel": true,
"self": 964.8668360168695
},
"steps_from_proto": {
"total": 94.69666040199172,
"count": 63536,
"is_parallel": true,
"self": 19.03215456144426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.66450584054746,
"count": 508288,
"is_parallel": true,
"self": 75.66450584054746
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 662.681308497883,
"count": 63537,
"self": 2.718562690884937,
"children": {
"process_trajectory": {
"total": 125.83051049999631,
"count": 63537,
"self": 125.57501935999608,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2554911400002311,
"count": 2,
"self": 0.2554911400002311
}
}
},
"_update_policy": {
"total": 534.1322353070018,
"count": 233,
"self": 299.8061782099394,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.3260570970624,
"count": 23076,
"self": 234.3260570970624
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.260002116207033e-07,
"count": 1,
"self": 9.260002116207033e-07
},
"TrainerController._save_models": {
"total": 0.08402856000066095,
"count": 1,
"self": 0.0013348200009204447,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0826937399997405,
"count": 1,
"self": 0.0826937399997405
}
}
}
}
}
}
}