ppo-Pyramids / run_logs /timers.json
n1kolAI's picture
First Push
a504272 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3530730605125427,
"min": 0.3530730605125427,
"max": 0.46950677037239075,
"count": 9
},
"Pyramids.Policy.Entropy.sum": {
"value": 10659.9814453125,
"min": 4740.1953125,
"max": 14084.595703125,
"count": 9
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 749959.0,
"max": 989946.0,
"count": 9
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 749959.0,
"max": 989946.0,
"count": 9
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5460996031761169,
"min": 0.40090814232826233,
"max": 0.5610513687133789,
"count": 9
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 149.63128662109375,
"min": 33.67628479003906,
"max": 156.5333251953125,
"count": 9
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.045362599194049835,
"min": 0.012379592284560204,
"max": 0.07051504403352737,
"count": 9
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 12.429351806640625,
"min": 1.9520580768585205,
"max": 19.109577178955078,
"count": 9
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.45348837209303,
"min": 307.25,
"max": 409.0945945945946,
"count": 9
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29709.0,
"min": 4916.0,
"max": 31830.0,
"count": 9
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5875310096247444,
"min": 1.4286972741420205,
"max": 1.6927499761804938,
"count": 9
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 138.11519783735275,
"min": 27.0839996188879,
"max": 138.11519783735275,
"count": 9
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5875310096247444,
"min": 1.4286972741420205,
"max": 1.6927499761804938,
"count": 9
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 138.11519783735275,
"min": 27.0839996188879,
"max": 138.11519783735275,
"count": 9
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.14356546166176298,
"min": 0.13462204318784643,
"max": 0.18721644777393429,
"count": 9
},
"Pyramids.Policy.RndReward.sum": {
"value": 12.490195164573379,
"min": 2.153952691005543,
"max": 14.602882926366874,
"count": 9
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07097199595903482,
"min": 0.06525425311701837,
"max": 0.0711059946500297,
"count": 9
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0645799393855222,
"min": 0.2808688572840765,
"max": 1.0645799393855222,
"count": 9
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015733428199389752,
"min": 0.01045725111180218,
"max": 0.01587640412894654,
"count": 9
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23600142299084625,
"min": 0.04182900444720872,
"max": 0.23600142299084625,
"count": 9
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.504177498640001e-06,
"min": 7.504177498640001e-06,
"max": 7.635472454845e-05,
"count": 9
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011256266247960001,
"min": 0.00011256266247960001,
"max": 0.0009891802702735996,
"count": 9
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250136000000001,
"min": 0.10250136000000001,
"max": 0.12545155,
"count": 9
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375204000000002,
"min": 0.5018062,
"max": 1.8071838,
"count": 9
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000259885864,
"min": 0.000259885864,
"max": 0.0025526098450000005,
"count": 9
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038982879600000008,
"min": 0.0038982879600000008,
"max": 0.033079667359999995,
"count": 9
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.04081549867987633,
"min": 0.037990253418684006,
"max": 0.04739264398813248,
"count": 9
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.612232506275177,
"min": 0.1895705759525299,
"max": 0.6411277651786804,
"count": 9
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 9
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 9
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710504483",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710505447"
},
"total": 964.5452561530001,
"count": 1,
"self": 0.635988392001309,
"children": {
"run_training.setup": {
"total": 0.06488856899886741,
"count": 1,
"self": 0.06488856899886741
},
"TrainerController.start_learning": {
"total": 963.8443791919999,
"count": 1,
"self": 0.6310442308713391,
"children": {
"TrainerController._reset_env": {
"total": 2.531344381000963,
"count": 1,
"self": 2.531344381000963
},
"TrainerController.advance": {
"total": 960.5842301901284,
"count": 16864,
"self": 0.7213993190562178,
"children": {
"env_step": {
"total": 677.6088358761117,
"count": 16864,
"self": 634.639690495027,
"children": {
"SubprocessEnvManager._take_step": {
"total": 42.58553765205943,
"count": 16864,
"self": 1.8989050834279624,
"children": {
"TorchPolicy.evaluate": {
"total": 40.68663256863147,
"count": 16296,
"self": 40.68663256863147
}
}
},
"workers": {
"total": 0.3836077290252433,
"count": 16864,
"self": 0.0,
"children": {
"worker_root": {
"total": 961.8012726820816,
"count": 16864,
"is_parallel": true,
"self": 377.5782632592345,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002364474999922095,
"count": 1,
"is_parallel": true,
"self": 0.0007095239961927291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016549510037293658,
"count": 8,
"is_parallel": true,
"self": 0.0016549510037293658
}
}
},
"UnityEnvironment.step": {
"total": 0.06431216400233097,
"count": 1,
"is_parallel": true,
"self": 0.0008240879978984594,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005248840025160462,
"count": 1,
"is_parallel": true,
"self": 0.0005248840025160462
},
"communicator.exchange": {
"total": 0.06087844099965878,
"count": 1,
"is_parallel": true,
"self": 0.06087844099965878
},
"steps_from_proto": {
"total": 0.00208475100225769,
"count": 1,
"is_parallel": true,
"self": 0.0005133920058142394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015713589964434505,
"count": 8,
"is_parallel": true,
"self": 0.0015713589964434505
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 584.2230094228471,
"count": 16863,
"is_parallel": true,
"self": 14.070101552333654,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.60006464601247,
"count": 16863,
"is_parallel": true,
"self": 7.60006464601247
},
"communicator.exchange": {
"total": 526.3479403123783,
"count": 16863,
"is_parallel": true,
"self": 526.3479403123783
},
"steps_from_proto": {
"total": 36.204902912122634,
"count": 16863,
"is_parallel": true,
"self": 7.864703232127795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.34019967999484,
"count": 134904,
"is_parallel": true,
"self": 28.34019967999484
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 282.2539949949605,
"count": 16864,
"self": 1.3100764917071501,
"children": {
"process_trajectory": {
"total": 44.93210952625668,
"count": 16864,
"self": 44.81748620525468,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11462332100200001,
"count": 1,
"self": 0.11462332100200001
}
}
},
"_update_policy": {
"total": 236.0118089769967,
"count": 122,
"self": 95.06718214407738,
"children": {
"TorchPPOOptimizer.update": {
"total": 140.9446268329193,
"count": 5871,
"self": 140.9446268329193
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.469986252952367e-07,
"count": 1,
"self": 9.469986252952367e-07
},
"TrainerController._save_models": {
"total": 0.09775944300054107,
"count": 1,
"self": 0.002824500999849988,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09493494200069108,
"count": 1,
"self": 0.09493494200069108
}
}
}
}
}
}
}