ppo-Pyramid / run_logs /timers.json
dgsilvia's picture
First Push
4aca0f7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43124398589134216,
"min": 0.3955431878566742,
"max": 1.4614990949630737,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12875.220703125,
"min": 11992.353515625,
"max": 44336.03515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989880.0,
"min": 29952.0,
"max": 989880.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989880.0,
"min": 29952.0,
"max": 989880.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.22162096202373505,
"min": -0.0918092131614685,
"max": 0.22285932302474976,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 56.95658874511719,
"min": -21.942401885986328,
"max": 56.95658874511719,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0030780162196606398,
"min": -0.0030780162196606398,
"max": 0.4806993007659912,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.7910501956939697,
"min": -0.7910501956939697,
"max": 113.92573547363281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06555234380565318,
"min": 0.06555234380565318,
"max": 0.07402957987652684,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9177328132791445,
"min": 0.5024545092860587,
"max": 1.094846398711168,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01231775626304928,
"min": 0.0005916320509431199,
"max": 0.01231775626304928,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17244858768268992,
"min": 0.0067553742327361695,
"max": 0.17244858768268992,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.633340312728572e-06,
"min": 7.633340312728572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010686676437820001,
"min": 0.00010686676437820001,
"max": 0.0035080322306559994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025444142857143,
"min": 0.1025444142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356218000000003,
"min": 1.3886848,
"max": 2.569344,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026418698714285717,
"min": 0.00026418698714285717,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036986178200000006,
"min": 0.0036986178200000006,
"max": 0.11695746560000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01625346578657627,
"min": 0.01625346578657627,
"max": 0.5988557934761047,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2275485247373581,
"min": 0.2275485247373581,
"max": 4.191990375518799,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 580.0566037735849,
"min": 580.0566037735849,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30743.0,
"min": 15984.0,
"max": 32485.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8914678982406292,
"min": -1.0000000521540642,
"max": 0.921843963265419,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 47.24779860675335,
"min": -30.482801668345928,
"max": 47.24779860675335,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8914678982406292,
"min": -1.0000000521540642,
"max": 0.921843963265419,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 47.24779860675335,
"min": -30.482801668345928,
"max": 47.24779860675335,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09856407713850099,
"min": 0.09856407713850099,
"max": 12.231072887778282,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.2238960883405525,
"min": 4.975898523582146,
"max": 195.69716620445251,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756194021",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidTraining --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756196326"
},
"total": 2305.478575574,
"count": 1,
"self": 0.48331715999984226,
"children": {
"run_training.setup": {
"total": 0.023233363999906942,
"count": 1,
"self": 0.023233363999906942
},
"TrainerController.start_learning": {
"total": 2304.97202505,
"count": 1,
"self": 1.7317995159551174,
"children": {
"TrainerController._reset_env": {
"total": 3.78912264600001,
"count": 1,
"self": 3.78912264600001
},
"TrainerController.advance": {
"total": 2299.3663940350443,
"count": 63341,
"self": 1.7410600979910669,
"children": {
"env_step": {
"total": 1600.7298655310628,
"count": 63341,
"self": 1425.0371003070877,
"children": {
"SubprocessEnvManager._take_step": {
"total": 174.67237272498346,
"count": 63341,
"self": 5.381487478983445,
"children": {
"TorchPolicy.evaluate": {
"total": 169.29088524600002,
"count": 62560,
"self": 169.29088524600002
}
}
},
"workers": {
"total": 1.020392498991555,
"count": 63341,
"self": 0.0,
"children": {
"worker_root": {
"total": 2298.8639346379464,
"count": 63341,
"is_parallel": true,
"self": 1004.537562184928,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007566507000092315,
"count": 1,
"is_parallel": true,
"self": 0.005492876000175784,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020736309999165314,
"count": 8,
"is_parallel": true,
"self": 0.0020736309999165314
}
}
},
"UnityEnvironment.step": {
"total": 0.053866833000029146,
"count": 1,
"is_parallel": true,
"self": 0.0006071669999982987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005220570000119551,
"count": 1,
"is_parallel": true,
"self": 0.0005220570000119551
},
"communicator.exchange": {
"total": 0.050911367000026075,
"count": 1,
"is_parallel": true,
"self": 0.050911367000026075
},
"steps_from_proto": {
"total": 0.001826241999992817,
"count": 1,
"is_parallel": true,
"self": 0.0003880630000594465,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014381789999333705,
"count": 8,
"is_parallel": true,
"self": 0.0014381789999333705
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1294.3263724530184,
"count": 63340,
"is_parallel": true,
"self": 35.23027520702749,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.139718946025255,
"count": 63340,
"is_parallel": true,
"self": 25.139718946025255
},
"communicator.exchange": {
"total": 1126.2381694849978,
"count": 63340,
"is_parallel": true,
"self": 1126.2381694849978
},
"steps_from_proto": {
"total": 107.71820881496774,
"count": 63340,
"is_parallel": true,
"self": 22.350479007951208,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.36772980701653,
"count": 506720,
"is_parallel": true,
"self": 85.36772980701653
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 696.8954684059905,
"count": 63341,
"self": 3.341401394933655,
"children": {
"process_trajectory": {
"total": 135.34874582106045,
"count": 63341,
"self": 135.06540600206017,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2833398190002754,
"count": 2,
"self": 0.2833398190002754
}
}
},
"_update_policy": {
"total": 558.2053211899964,
"count": 447,
"self": 307.47897516396984,
"children": {
"TorchPPOOptimizer.update": {
"total": 250.7263460260266,
"count": 22782,
"self": 250.7263460260266
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.630002750782296e-07,
"count": 1,
"self": 9.630002750782296e-07
},
"TrainerController._save_models": {
"total": 0.08470789000011791,
"count": 1,
"self": 0.001316566000241437,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08339132399987648,
"count": 1,
"self": 0.08339132399987648
}
}
}
}
}
}
}