ppo-PyramidsRND / run_logs /timers.json
Yatsrib's picture
First Try
6a89856 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.3917039632797241,
"min": 1.3078267574310303,
"max": 1.5998151302337646,
"count": 20
},
"Pyramids.Policy.Entropy.sum": {
"value": 69652.0,
"min": 65454.11328125,
"max": 80067.546875,
"count": 20
},
"Pyramids.Step.mean": {
"value": 999936.0,
"min": 49920.0,
"max": 999936.0,
"count": 20
},
"Pyramids.Step.sum": {
"value": 999936.0,
"min": 49920.0,
"max": 999936.0,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09946495294570923,
"min": -0.18257516622543335,
"max": -0.09841363877058029,
"count": 20
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -39.785980224609375,
"min": -72.84748840332031,
"max": -39.365455627441406,
"count": 20
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.02219029761835547,
"min": 0.020788404085753352,
"max": 0.025556642267584914,
"count": 20
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.08876119047342187,
"min": 0.08315361634301341,
"max": 0.12379502745302902,
"count": 20
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 2.7575494878056827e-05,
"min": 2.7575494878056827e-05,
"max": 0.003511216849526108,
"count": 20
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.00011030197951222731,
"min": 0.00011030197951222731,
"max": 0.014044867398104432,
"count": 20
},
"Pyramids.Policy.LearningRate.mean": {
"value": 6.556897814400004e-06,
"min": 6.556897814400004e-06,
"max": 0.00029095680301439997,
"count": 20
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.6227591257600017e-05,
"min": 2.6227591257600017e-05,
"max": 0.0011638272120575999,
"count": 20
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1021856,
"min": 0.1021856,
"max": 0.19698560000000004,
"count": 20
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.4087424,
"min": 0.4087424,
"max": 0.8379136,
"count": 20
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00011906144000000008,
"min": 0.00011906144000000008,
"max": 0.0048495814399999995,
"count": 20
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0004762457600000003,
"min": 0.0004762457600000003,
"max": 0.019398325759999998,
"count": 20
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 20
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 47952.0,
"min": 47952.0,
"max": 63936.0,
"count": 20
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 20
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -48.00000250339508,
"min": -63.00000328570604,
"max": -48.00000250339508,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 20
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -48.00000250339508,
"min": -63.00000328570604,
"max": -48.00000250339508,
"count": 20
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1750393761",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1750395803"
},
"total": 2042.8476111649998,
"count": 1,
"self": 0.6345528080000804,
"children": {
"run_training.setup": {
"total": 0.03607151699998212,
"count": 1,
"self": 0.03607151699998212
},
"TrainerController.start_learning": {
"total": 2042.1769868399997,
"count": 1,
"self": 2.232530673011752,
"children": {
"TrainerController._reset_env": {
"total": 3.868706767999811,
"count": 1,
"self": 3.868706767999811
},
"TrainerController.advance": {
"total": 2036.0319226079887,
"count": 62512,
"self": 2.543822371947499,
"children": {
"env_step": {
"total": 1589.56686540308,
"count": 62512,
"self": 1453.6718032770827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.54576765804677,
"count": 62512,
"self": 7.402292411016333,
"children": {
"TorchPolicy.evaluate": {
"total": 127.14347524703044,
"count": 62512,
"self": 127.14347524703044
}
}
},
"workers": {
"total": 1.3492944679505854,
"count": 62512,
"self": 0.0,
"children": {
"worker_root": {
"total": 2033.883055801925,
"count": 62512,
"is_parallel": true,
"self": 757.7096026158738,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0034344640002927918,
"count": 1,
"is_parallel": true,
"self": 0.0013138289991729835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021206350011198083,
"count": 8,
"is_parallel": true,
"self": 0.0021206350011198083
}
}
},
"UnityEnvironment.step": {
"total": 0.11360919899971123,
"count": 1,
"is_parallel": true,
"self": 0.0007060579996505112,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005186059997868142,
"count": 1,
"is_parallel": true,
"self": 0.0005186059997868142
},
"communicator.exchange": {
"total": 0.10615657300013481,
"count": 1,
"is_parallel": true,
"self": 0.10615657300013481
},
"steps_from_proto": {
"total": 0.006227962000139087,
"count": 1,
"is_parallel": true,
"self": 0.0047121830007199605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015157789994191262,
"count": 8,
"is_parallel": true,
"self": 0.0015157789994191262
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1276.1734531860511,
"count": 62511,
"is_parallel": true,
"self": 44.529280016100074,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.84765811698071,
"count": 62511,
"is_parallel": true,
"self": 31.84765811698071
},
"communicator.exchange": {
"total": 1073.707808625902,
"count": 62511,
"is_parallel": true,
"self": 1073.707808625902
},
"steps_from_proto": {
"total": 126.08870642706825,
"count": 62511,
"is_parallel": true,
"self": 26.9193418209652,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.16936460610304,
"count": 500088,
"is_parallel": true,
"self": 99.16936460610304
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 443.9212348329611,
"count": 62512,
"self": 2.839592646929759,
"children": {
"process_trajectory": {
"total": 114.76662026802933,
"count": 62512,
"self": 114.61849077902934,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14812948899998446,
"count": 2,
"self": 0.14812948899998446
}
}
},
"_update_policy": {
"total": 326.315021918002,
"count": 83,
"self": 230.50830351902505,
"children": {
"TorchPPOOptimizer.update": {
"total": 95.80671839897695,
"count": 2802,
"self": 95.80671839897695
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.509994924883358e-07,
"count": 1,
"self": 9.509994924883358e-07
},
"TrainerController._save_models": {
"total": 0.04382583999995404,
"count": 1,
"self": 0.0007759050004096935,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04304993499954435,
"count": 1,
"self": 0.04304993499954435
}
}
}
}
}
}
}