ppo-Pyramids / run_logs /timers.json
Yifei00's picture
First Push
a28d666 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.37409350275993347,
"min": 0.370309054851532,
"max": 1.5059510469436646,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11270.689453125,
"min": 11127.046875,
"max": 45684.53125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.573559045791626,
"min": -0.09332586079835892,
"max": 0.6237437129020691,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.74365234375,
"min": -22.584857940673828,
"max": 176.51947021484375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.028428923338651657,
"min": 0.00021052270312793553,
"max": 0.21615830063819885,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.016956329345703,
"min": 0.055577993392944336,
"max": 51.87799072265625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06688897711518664,
"min": 0.06267397501505913,
"max": 0.07372745791194732,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9364456796126129,
"min": 0.4988866531577473,
"max": 1.0612269119483955,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01355622970974461,
"min": 0.0017014080565630415,
"max": 0.015489067673018152,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18978721593642453,
"min": 0.012078738498436499,
"max": 0.23233601509527227,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.50324749895e-06,
"min": 7.50324749895e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001050454649853,
"min": 0.0001050454649853,
"max": 0.0036335290888237,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250105,
"min": 0.10250105,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350147,
"min": 1.3691136000000002,
"max": 2.6172977000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025985489500000007,
"min": 0.00025985489500000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036379685300000006,
"min": 0.0036379685300000006,
"max": 0.12113651237000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011345063336193562,
"min": 0.011345063336193562,
"max": 0.32017460465431213,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15883088111877441,
"min": 0.15883088111877441,
"max": 2.2412221431732178,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 321.6629213483146,
"min": 299.24,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28628.0,
"min": 15984.0,
"max": 32736.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6323408953506837,
"min": -1.0000000521540642,
"max": 1.6407319839298724,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 143.64599879086018,
"min": -32.000001668930054,
"max": 164.07319839298725,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6323408953506837,
"min": -1.0000000521540642,
"max": 1.6407319839298724,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 143.64599879086018,
"min": -32.000001668930054,
"max": 164.07319839298725,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03771270953835814,
"min": 0.03706527712653042,
"max": 6.781306769698858,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.318718439375516,
"min": 3.318718439375516,
"max": 108.50090831518173,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719041091",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719043284"
},
"total": 2193.950944776,
"count": 1,
"self": 0.42701143100021,
"children": {
"run_training.setup": {
"total": 0.0565887110001313,
"count": 1,
"self": 0.0565887110001313
},
"TrainerController.start_learning": {
"total": 2193.467344634,
"count": 1,
"self": 1.4388411920367616,
"children": {
"TrainerController._reset_env": {
"total": 2.3235539919999155,
"count": 1,
"self": 2.3235539919999155
},
"TrainerController.advance": {
"total": 2189.619546709963,
"count": 64043,
"self": 1.5378202769693416,
"children": {
"env_step": {
"total": 1551.7077866710083,
"count": 64043,
"self": 1418.1833083099725,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.6541122490048,
"count": 64043,
"self": 4.953292605087427,
"children": {
"TorchPolicy.evaluate": {
"total": 127.70081964391738,
"count": 62556,
"self": 127.70081964391738
}
}
},
"workers": {
"total": 0.8703661120309789,
"count": 64043,
"self": 0.0,
"children": {
"worker_root": {
"total": 2188.342963169965,
"count": 64043,
"is_parallel": true,
"self": 894.1361921808841,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019324759998653462,
"count": 1,
"is_parallel": true,
"self": 0.0005659300002207601,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013665459996445861,
"count": 8,
"is_parallel": true,
"self": 0.0013665459996445861
}
}
},
"UnityEnvironment.step": {
"total": 0.04835347599987472,
"count": 1,
"is_parallel": true,
"self": 0.0006186400000842696,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005218299997977738,
"count": 1,
"is_parallel": true,
"self": 0.0005218299997977738
},
"communicator.exchange": {
"total": 0.04554389899999478,
"count": 1,
"is_parallel": true,
"self": 0.04554389899999478
},
"steps_from_proto": {
"total": 0.0016691069999978936,
"count": 1,
"is_parallel": true,
"self": 0.00040812400015965977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012609829998382338,
"count": 8,
"is_parallel": true,
"self": 0.0012609829998382338
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1294.206770989081,
"count": 64042,
"is_parallel": true,
"self": 34.09298782209885,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.91408143796434,
"count": 64042,
"is_parallel": true,
"self": 22.91408143796434
},
"communicator.exchange": {
"total": 1140.0932194509592,
"count": 64042,
"is_parallel": true,
"self": 1140.0932194509592
},
"steps_from_proto": {
"total": 97.10648227805859,
"count": 64042,
"is_parallel": true,
"self": 19.834931232934423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.27155104512417,
"count": 512336,
"is_parallel": true,
"self": 77.27155104512417
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 636.373939761985,
"count": 64043,
"self": 2.7410948529670804,
"children": {
"process_trajectory": {
"total": 128.04040358501243,
"count": 64043,
"self": 127.8473422880129,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1930612969995309,
"count": 2,
"self": 0.1930612969995309
}
}
},
"_update_policy": {
"total": 505.59244132400545,
"count": 451,
"self": 298.1579006319914,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.43454069201402,
"count": 22809,
"self": 207.43454069201402
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3700000636163168e-06,
"count": 1,
"self": 1.3700000636163168e-06
},
"TrainerController._save_models": {
"total": 0.08540136999999959,
"count": 1,
"self": 0.0015271219999704044,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08387424800002918,
"count": 1,
"self": 0.08387424800002918
}
}
}
}
}
}
}