ppo-Pyramids / run_logs /timers.json
Baumeister's picture
First Commit
46a8ac7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.23551686108112335,
"min": 0.23551686108112335,
"max": 1.428954005241394,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 7065.505859375,
"min": 7065.505859375,
"max": 43348.75,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499985.0,
"min": 29889.0,
"max": 1499985.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499985.0,
"min": 29889.0,
"max": 1499985.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7679235339164734,
"min": -0.10165933519601822,
"max": 0.7862588763237,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 227.30535888671875,
"min": -24.49989891052246,
"max": 234.30514526367188,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.004572571255266666,
"min": -0.011763223446905613,
"max": 0.7160788774490356,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.3534810543060303,
"min": -3.434861183166504,
"max": 169.710693359375,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06972586715901928,
"min": 0.06600348931847382,
"max": 0.07494587959867538,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9761621402262699,
"min": 0.5246211571907277,
"max": 1.0909137852289075,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01428520691971404,
"min": 0.0003765190338286428,
"max": 0.01898136554900434,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19999289687599656,
"min": 0.0045182284059437135,
"max": 0.23602124210932138,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.0001515544851961405,
"min": 0.0001515544851961405,
"max": 0.00029838354339596195,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0021217627927459667,
"min": 0.0020886848037717336,
"max": 0.0038474494175169335,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.15051814523809526,
"min": 0.15051814523809526,
"max": 0.19946118095238097,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.1072540333333336,
"min": 1.3962282666666668,
"max": 2.7824830666666664,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.005056762709285714,
"min": 0.005056762709285714,
"max": 0.009946171977142856,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.07079467793,
"min": 0.06962320384,
"max": 0.12827005836,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008640543557703495,
"min": 0.00850665383040905,
"max": 0.6313191652297974,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12096761167049408,
"min": 0.119093157351017,
"max": 4.419234275817871,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 261.1709401709402,
"min": 238.1639344262295,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30557.0,
"min": 16816.0,
"max": 32145.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.70463075214981,
"min": -0.9999871489501768,
"max": 1.742260641983298,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 199.4417980015278,
"min": -31.998801678419113,
"max": 212.55579832196236,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.70463075214981,
"min": -0.9999871489501768,
"max": 1.742260641983298,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 199.4417980015278,
"min": -31.998801678419113,
"max": 212.55579832196236,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02336091256643741,
"min": 0.023257140563056635,
"max": 12.748844596831237,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.733226770273177,
"min": 2.60449622804299,
"max": 216.73035814613104,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1717295843",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1717300939"
},
"total": 5096.572073262,
"count": 1,
"self": 10.016513268999915,
"children": {
"run_training.setup": {
"total": 0.06835060299999896,
"count": 1,
"self": 0.06835060299999896
},
"TrainerController.start_learning": {
"total": 5086.48720939,
"count": 1,
"self": 3.6904761299338134,
"children": {
"TrainerController._reset_env": {
"total": 3.5695399789999556,
"count": 1,
"self": 3.5695399789999556
},
"TrainerController.advance": {
"total": 5079.135129145066,
"count": 96963,
"self": 3.8947078281671565,
"children": {
"env_step": {
"total": 3470.3674948899925,
"count": 96963,
"self": 3233.5374938249865,
"children": {
"SubprocessEnvManager._take_step": {
"total": 234.63855389190542,
"count": 96963,
"self": 10.72977770082855,
"children": {
"TorchPolicy.evaluate": {
"total": 223.90877619107687,
"count": 94445,
"self": 223.90877619107687
}
}
},
"workers": {
"total": 2.1914471731006415,
"count": 96962,
"self": 0.0,
"children": {
"worker_root": {
"total": 5073.441598668095,
"count": 96962,
"is_parallel": true,
"self": 2122.2772730609995,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006185402000028262,
"count": 1,
"is_parallel": true,
"self": 0.004523844000118515,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016615579999097463,
"count": 8,
"is_parallel": true,
"self": 0.0016615579999097463
}
}
},
"UnityEnvironment.step": {
"total": 0.06134193499997309,
"count": 1,
"is_parallel": true,
"self": 0.0007351730000664247,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005265849999886996,
"count": 1,
"is_parallel": true,
"self": 0.0005265849999886996
},
"communicator.exchange": {
"total": 0.058123232999946595,
"count": 1,
"is_parallel": true,
"self": 0.058123232999946595
},
"steps_from_proto": {
"total": 0.0019569439999713723,
"count": 1,
"is_parallel": true,
"self": 0.0004011329998547808,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015558110001165915,
"count": 8,
"is_parallel": true,
"self": 0.0015558110001165915
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2951.1643256070956,
"count": 96961,
"is_parallel": true,
"self": 76.67513662533247,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 47.331383175808014,
"count": 96961,
"is_parallel": true,
"self": 47.331383175808014
},
"communicator.exchange": {
"total": 2629.3576256059255,
"count": 96961,
"is_parallel": true,
"self": 2629.3576256059255
},
"steps_from_proto": {
"total": 197.80018020002933,
"count": 96961,
"is_parallel": true,
"self": 42.47767654322058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 155.32250365680875,
"count": 775688,
"is_parallel": true,
"self": 155.32250365680875
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1604.872926426906,
"count": 96962,
"self": 6.674158850794356,
"children": {
"process_trajectory": {
"total": 249.86062613011893,
"count": 96962,
"self": 249.46366886611952,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3969572639994112,
"count": 3,
"self": 0.3969572639994112
}
}
},
"_update_policy": {
"total": 1348.3381414459927,
"count": 686,
"self": 552.8368815839934,
"children": {
"TorchPPOOptimizer.update": {
"total": 795.5012598619993,
"count": 34494,
"self": 795.5012598619993
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5260002328432165e-06,
"count": 1,
"self": 1.5260002328432165e-06
},
"TrainerController._save_models": {
"total": 0.09206261000053928,
"count": 1,
"self": 0.002072358000987151,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08999025199955213,
"count": 1,
"self": 0.08999025199955213
}
}
}
}
}
}
}