LemonIsGoose's picture
First Push
e7ad9d9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43201926350593567,
"min": 0.4126507341861725,
"max": 1.46349036693573,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12953.666015625,
"min": 12287.087890625,
"max": 44396.4453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989915.0,
"min": 29952.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989915.0,
"min": 29952.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2705666422843933,
"min": -0.10222095251083374,
"max": 0.3841298222541809,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 71.15902709960938,
"min": -24.635250091552734,
"max": 102.79904174804688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0276415403932333,
"min": -0.01512638758867979,
"max": 0.34357523918151855,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.269725322723389,
"min": -3.96311354637146,
"max": 81.42733001708984,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06657631831504184,
"min": 0.0649188039604471,
"max": 0.07286806072030835,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9320684564105857,
"min": 0.48991791259607126,
"max": 1.0930209108046254,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015950000248440552,
"min": 9.515976203027199e-05,
"max": 0.015950000248440552,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22330000347816772,
"min": 0.0011419171443632639,
"max": 0.22330000347816772,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.77211169504286e-06,
"min": 7.77211169504286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010880956373060002,
"min": 0.00010880956373060002,
"max": 0.0031171897609368,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10259067142857141,
"min": 0.10259067142857141,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4362693999999998,
"min": 1.3691136000000002,
"max": 2.358575,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026880807571428585,
"min": 0.00026880807571428585,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003763313060000002,
"min": 0.003763313060000002,
"max": 0.10392241368,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010256324894726276,
"min": 0.010149536654353142,
"max": 0.38958531618118286,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14358854293823242,
"min": 0.1420935094356537,
"max": 2.727097272872925,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 502.33846153846156,
"min": 434.5072463768116,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32652.0,
"min": 15984.0,
"max": 34641.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.056093732593581,
"min": -1.0000000521540642,
"max": 1.4205217222156732,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 67.58999888598919,
"min": -32.000001668930054,
"max": 98.01599883288145,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.056093732593581,
"min": -1.0000000521540642,
"max": 1.4205217222156732,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 67.58999888598919,
"min": -32.000001668930054,
"max": 98.01599883288145,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05354071552892492,
"min": 0.051713275283649295,
"max": 7.448008037172258,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.426605793851195,
"min": 2.803995841415599,
"max": 119.16812859475613,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1761816776",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1761818871"
},
"total": 2095.172577696,
"count": 1,
"self": 0.44956779399944935,
"children": {
"run_training.setup": {
"total": 0.02278980400024011,
"count": 1,
"self": 0.02278980400024011
},
"TrainerController.start_learning": {
"total": 2094.7002200980005,
"count": 1,
"self": 1.2415438439129503,
"children": {
"TrainerController._reset_env": {
"total": 2.0502595589996417,
"count": 1,
"self": 2.0502595589996417
},
"TrainerController.advance": {
"total": 2091.3343389500887,
"count": 63449,
"self": 1.2972198623238,
"children": {
"env_step": {
"total": 1453.6420969577448,
"count": 63449,
"self": 1311.2657770058504,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.64597818002858,
"count": 63449,
"self": 4.430292254113738,
"children": {
"TorchPolicy.evaluate": {
"total": 137.21568592591484,
"count": 62561,
"self": 137.21568592591484
}
}
},
"workers": {
"total": 0.7303417718658238,
"count": 63449,
"self": 0.0,
"children": {
"worker_root": {
"total": 2087.979813080963,
"count": 63449,
"is_parallel": true,
"self": 887.6161081809878,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021586829998341273,
"count": 1,
"is_parallel": true,
"self": 0.0006511580004371353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001507524999396992,
"count": 8,
"is_parallel": true,
"self": 0.001507524999396992
}
}
},
"UnityEnvironment.step": {
"total": 0.0501952340000571,
"count": 1,
"is_parallel": true,
"self": 0.0005296079998515779,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000510750000103144,
"count": 1,
"is_parallel": true,
"self": 0.000510750000103144
},
"communicator.exchange": {
"total": 0.047508003000075405,
"count": 1,
"is_parallel": true,
"self": 0.047508003000075405
},
"steps_from_proto": {
"total": 0.0016468730000269716,
"count": 1,
"is_parallel": true,
"self": 0.00034802899881469784,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012988440012122737,
"count": 8,
"is_parallel": true,
"self": 0.0012988440012122737
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1200.3637048999753,
"count": 63448,
"is_parallel": true,
"self": 32.34217209886356,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.56324245111682,
"count": 63448,
"is_parallel": true,
"self": 22.56324245111682
},
"communicator.exchange": {
"total": 1041.4033060839265,
"count": 63448,
"is_parallel": true,
"self": 1041.4033060839265
},
"steps_from_proto": {
"total": 104.05498426606846,
"count": 63448,
"is_parallel": true,
"self": 21.133946034768996,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.92103823129946,
"count": 507584,
"is_parallel": true,
"self": 82.92103823129946
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 636.3950221300202,
"count": 63449,
"self": 2.2017558661127623,
"children": {
"process_trajectory": {
"total": 118.60743757891669,
"count": 63449,
"self": 118.42981235591651,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17762522300017736,
"count": 2,
"self": 0.17762522300017736
}
}
},
"_update_policy": {
"total": 515.5858286849907,
"count": 437,
"self": 286.60466157509927,
"children": {
"TorchPPOOptimizer.update": {
"total": 228.98116710989143,
"count": 22797,
"self": 228.98116710989143
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0969997674692422e-06,
"count": 1,
"self": 1.0969997674692422e-06
},
"TrainerController._save_models": {
"total": 0.07407664799939084,
"count": 1,
"self": 0.001014790999761317,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07306185699962953,
"count": 1,
"self": 0.07306185699962953
}
}
}
}
}
}
}