ppo-Pyramids / run_logs /timers.json
Deflyer's picture
First Push
ce5cf06 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3324770927429199,
"min": 0.3324770927429199,
"max": 1.4548792839050293,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9953.0341796875,
"min": 9953.0341796875,
"max": 44135.21875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989918.0,
"min": 29952.0,
"max": 989918.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989918.0,
"min": 29952.0,
"max": 989918.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5550710558891296,
"min": -0.06832250952720642,
"max": 0.6014209985733032,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.7546844482422,
"min": -16.46572494506836,
"max": 170.80355834960938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01205187477171421,
"min": -0.06452063471078873,
"max": 0.34229642152786255,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.338369369506836,
"min": -16.710844039916992,
"max": 81.12425231933594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07036904126912955,
"min": 0.0650227955550331,
"max": 0.07307769231585587,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9851665777678137,
"min": 0.4959885253632526,
"max": 1.0382972253331293,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016767020660890485,
"min": 0.0017716287481575866,
"max": 0.016767020660890485,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23473828925246681,
"min": 0.017716287481575867,
"max": 0.23473828925246681,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.478304650121427e-06,
"min": 7.478304650121427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010469626510169998,
"min": 0.00010469626510169998,
"max": 0.0036338002887333,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249273571428572,
"min": 0.10249273571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348983,
"min": 1.3886848,
"max": 2.6112667000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002590242978571428,
"min": 0.0002590242978571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00362634017,
"min": 0.00362634017,
"max": 0.12114554333000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010566731914877892,
"min": 0.010566731914877892,
"max": 0.48450082540512085,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14793424308300018,
"min": 0.14793424308300018,
"max": 3.391505718231201,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 337.4623655913978,
"min": 309.67021276595744,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31384.0,
"min": 15984.0,
"max": 32714.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.533470953344017,
"min": -1.0000000521540642,
"max": 1.6264680677113381,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.61279866099358,
"min": -29.90340167284012,
"max": 156.68439809978008,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.533470953344017,
"min": -1.0000000521540642,
"max": 1.6264680677113381,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.61279866099358,
"min": -29.90340167284012,
"max": 156.68439809978008,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03668960201508936,
"min": 0.03668960201508936,
"max": 10.423358355648816,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.412132987403311,
"min": 3.412132987403311,
"max": 166.77373369038105,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742656950",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742659395"
},
"total": 2444.5165743879998,
"count": 1,
"self": 0.47603199599961954,
"children": {
"run_training.setup": {
"total": 0.028031152999801634,
"count": 1,
"self": 0.028031152999801634
},
"TrainerController.start_learning": {
"total": 2444.012511239,
"count": 1,
"self": 1.5071535629986101,
"children": {
"TrainerController._reset_env": {
"total": 2.8056586739999148,
"count": 1,
"self": 2.8056586739999148
},
"TrainerController.advance": {
"total": 2439.603938704002,
"count": 64043,
"self": 1.5645116429850532,
"children": {
"env_step": {
"total": 1728.7329676030001,
"count": 64043,
"self": 1561.7729535029453,
"children": {
"SubprocessEnvManager._take_step": {
"total": 166.10913269306093,
"count": 64043,
"self": 4.947578988074838,
"children": {
"TorchPolicy.evaluate": {
"total": 161.1615537049861,
"count": 62553,
"self": 161.1615537049861
}
}
},
"workers": {
"total": 0.850881406993949,
"count": 64043,
"self": 0.0,
"children": {
"worker_root": {
"total": 2438.443322993041,
"count": 64043,
"is_parallel": true,
"self": 999.1339769820513,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004184065000117698,
"count": 1,
"is_parallel": true,
"self": 0.000808286999927077,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033757780001906212,
"count": 8,
"is_parallel": true,
"self": 0.0033757780001906212
}
}
},
"UnityEnvironment.step": {
"total": 0.056165880999969886,
"count": 1,
"is_parallel": true,
"self": 0.0006252719999793044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004908230000637559,
"count": 1,
"is_parallel": true,
"self": 0.0004908230000637559
},
"communicator.exchange": {
"total": 0.05333701299991844,
"count": 1,
"is_parallel": true,
"self": 0.05333701299991844
},
"steps_from_proto": {
"total": 0.001712773000008383,
"count": 1,
"is_parallel": true,
"self": 0.0003677239999433368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013450490000650461,
"count": 8,
"is_parallel": true,
"self": 0.0013450490000650461
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1439.3093460109897,
"count": 64042,
"is_parallel": true,
"self": 33.327104415928034,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.594097405083858,
"count": 64042,
"is_parallel": true,
"self": 24.594097405083858
},
"communicator.exchange": {
"total": 1278.071701082994,
"count": 64042,
"is_parallel": true,
"self": 1278.071701082994
},
"steps_from_proto": {
"total": 103.3164431069838,
"count": 64042,
"is_parallel": true,
"self": 21.4077753642282,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.9086677427556,
"count": 512336,
"is_parallel": true,
"self": 81.9086677427556
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 709.3064594580167,
"count": 64043,
"self": 2.924165888109883,
"children": {
"process_trajectory": {
"total": 137.21255849590534,
"count": 64043,
"self": 136.99152616690526,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22103232900008152,
"count": 2,
"self": 0.22103232900008152
}
}
},
"_update_policy": {
"total": 569.1697350740014,
"count": 455,
"self": 312.6699589459936,
"children": {
"TorchPPOOptimizer.update": {
"total": 256.49977612800785,
"count": 22818,
"self": 256.49977612800785
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.619998309062794e-07,
"count": 1,
"self": 9.619998309062794e-07
},
"TrainerController._save_models": {
"total": 0.09575933599990094,
"count": 1,
"self": 0.0015845049997551541,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09417483100014579,
"count": 1,
"self": 0.09417483100014579
}
}
}
}
}
}
}