Pyramids / run_logs /timers.json
lisagrace's picture
First Push
3106903 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4289659857749939,
"min": 0.4289659857749939,
"max": 1.31809401512146,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12827.798828125,
"min": 12827.798828125,
"max": 39985.69921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989912.0,
"min": 29952.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989912.0,
"min": 29952.0,
"max": 989912.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.508364737033844,
"min": -0.19363939762115479,
"max": 0.5636441111564636,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 139.80030822753906,
"min": -45.89253616333008,
"max": 156.6930694580078,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.17498508095741272,
"min": -0.004486272111535072,
"max": 0.6072382926940918,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 48.12089920043945,
"min": -1.1439993381500244,
"max": 143.9154815673828,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06723515045069646,
"min": 0.06648845359588797,
"max": 0.07345523796231304,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9412921063097504,
"min": 0.508330280637307,
"max": 1.0283733314723826,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01812535998137069,
"min": 0.000507436916675891,
"max": 0.020018696852882083,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25375503973918967,
"min": 0.0055818060834348015,
"max": 0.2802617559403492,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.519254636471429e-06,
"min": 7.519254636471429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010526956491060001,
"min": 0.00010526956491060001,
"max": 0.0035077898307368,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250638571428573,
"min": 0.10250638571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350894000000003,
"min": 1.3886848,
"max": 2.5692632,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002603879328571429,
"min": 0.0002603879328571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036454310600000004,
"min": 0.0036454310600000004,
"max": 0.11694939368,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012963595800101757,
"min": 0.012963595800101757,
"max": 0.5573433041572571,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18149034678936005,
"min": 0.18149034678936005,
"max": 3.9014031887054443,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 360.0617283950617,
"min": 336.04395604395603,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29165.0,
"min": 15984.0,
"max": 32508.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5411481364273731,
"min": -1.0000000521540642,
"max": 1.6213487612038124,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 124.83299905061722,
"min": -31.99920167028904,
"max": 147.4189984947443,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5411481364273731,
"min": -1.0000000521540642,
"max": 1.6213487612038124,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 124.83299905061722,
"min": -31.99920167028904,
"max": 147.4189984947443,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04818870840927609,
"min": 0.04584684555775711,
"max": 11.247974168509245,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9032853811513633,
"min": 3.9032853811513633,
"max": 179.96758669614792,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711621065",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executable/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711623249"
},
"total": 2183.79927541,
"count": 1,
"self": 0.5281713340004899,
"children": {
"run_training.setup": {
"total": 0.04975011800001994,
"count": 1,
"self": 0.04975011800001994
},
"TrainerController.start_learning": {
"total": 2183.221353958,
"count": 1,
"self": 1.3033407250240998,
"children": {
"TrainerController._reset_env": {
"total": 2.8682284879999997,
"count": 1,
"self": 2.8682284879999997
},
"TrainerController.advance": {
"total": 2178.9671756909756,
"count": 63747,
"self": 1.3575641439238098,
"children": {
"env_step": {
"total": 1559.7008290100096,
"count": 63747,
"self": 1430.8532502829635,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.03548417902869,
"count": 63747,
"self": 4.585432906048027,
"children": {
"TorchPolicy.evaluate": {
"total": 123.45005127298066,
"count": 62561,
"self": 123.45005127298066
}
}
},
"workers": {
"total": 0.8120945480175124,
"count": 63747,
"self": 0.0,
"children": {
"worker_root": {
"total": 2178.3120208219507,
"count": 63747,
"is_parallel": true,
"self": 864.9351713520091,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007843702999934976,
"count": 1,
"is_parallel": true,
"self": 0.004084966999812423,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0037587360001225534,
"count": 8,
"is_parallel": true,
"self": 0.0037587360001225534
}
}
},
"UnityEnvironment.step": {
"total": 0.0802447519999987,
"count": 1,
"is_parallel": true,
"self": 0.0007285710000815016,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004132469999831301,
"count": 1,
"is_parallel": true,
"self": 0.0004132469999831301
},
"communicator.exchange": {
"total": 0.07637481099993693,
"count": 1,
"is_parallel": true,
"self": 0.07637481099993693
},
"steps_from_proto": {
"total": 0.00272812299999714,
"count": 1,
"is_parallel": true,
"self": 0.00040108599978339043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023270370002137497,
"count": 8,
"is_parallel": true,
"self": 0.0023270370002137497
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.3768494699416,
"count": 63746,
"is_parallel": true,
"self": 34.13095525298968,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.306391009997355,
"count": 63746,
"is_parallel": true,
"self": 23.306391009997355
},
"communicator.exchange": {
"total": 1158.5245717029602,
"count": 63746,
"is_parallel": true,
"self": 1158.5245717029602
},
"steps_from_proto": {
"total": 97.41493150399435,
"count": 63746,
"is_parallel": true,
"self": 19.4339367030866,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.98099480090775,
"count": 509968,
"is_parallel": true,
"self": 77.98099480090775
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 617.908782537042,
"count": 63747,
"self": 2.615125084057695,
"children": {
"process_trajectory": {
"total": 126.1011633499819,
"count": 63747,
"self": 125.75416057098153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3470027790003769,
"count": 2,
"self": 0.3470027790003769
}
}
},
"_update_policy": {
"total": 489.19249410300245,
"count": 451,
"self": 284.8845118409936,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.30798226200886,
"count": 22809,
"self": 204.30798226200886
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.193000116472831e-06,
"count": 1,
"self": 1.193000116472831e-06
},
"TrainerController._save_models": {
"total": 0.08260786099981487,
"count": 1,
"self": 0.0013252549997559981,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08128260600005888,
"count": 1,
"self": 0.08128260600005888
}
}
}
}
}
}
}