PyramidsRND / run_logs /timers.json
ShreyasM's picture
First push
4d3345f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3618449568748474,
"min": 0.3529750406742096,
"max": 0.8553169965744019,
"count": 28
},
"Pyramids.Policy.Entropy.sum": {
"value": 10762.716796875,
"min": 10594.8984375,
"max": 25604.76953125,
"count": 28
},
"Pyramids.Step.mean": {
"value": 989993.0,
"min": 179985.0,
"max": 989993.0,
"count": 28
},
"Pyramids.Step.sum": {
"value": 989993.0,
"min": 179985.0,
"max": 989993.0,
"count": 28
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5068752765655518,
"min": -0.08530397713184357,
"max": 0.5666282176971436,
"count": 28
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 140.40444946289062,
"min": -20.47295379638672,
"max": 158.65589904785156,
"count": 28
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.00644795224070549,
"min": -0.028411375358700752,
"max": 0.08228707313537598,
"count": 28
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.7860827445983887,
"min": -7.529014587402344,
"max": 19.666610717773438,
"count": 28
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06689294658641882,
"min": 0.0631164039954894,
"max": 0.0742791079239199,
"count": 28
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9365012522098636,
"min": 0.6485527186448691,
"max": 1.0399075109348785,
"count": 28
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01560520268799783,
"min": 0.0007890843867962358,
"max": 0.01567497939030028,
"count": 28
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21847283763196962,
"min": 0.010258097028351065,
"max": 0.21944971146420394,
"count": 28
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.766240268428573e-06,
"min": 7.766240268428573e-06,
"max": 0.0002497925834024778,
"count": 28
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010872736375800002,
"min": 0.00010872736375800002,
"max": 0.0031397969534011,
"count": 28
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258871428571428,
"min": 0.10258871428571428,
"max": 0.18326418888888893,
"count": 28
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.436242,
"min": 1.436242,
"max": 2.4427779000000003,
"count": 28
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026861255714285724,
"min": 0.00026861255714285724,
"max": 0.00832809247,
"count": 28
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037605758000000015,
"min": 0.0037605758000000015,
"max": 0.10468523010999999,
"count": 28
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011675438843667507,
"min": 0.011312982998788357,
"max": 0.048750050365924835,
"count": 28
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16345614194869995,
"min": 0.15838176012039185,
"max": 0.5191813111305237,
"count": 28
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 370.26190476190476,
"min": 351.4597701149425,
"max": 978.59375,
"count": 28
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31102.0,
"min": 18160.0,
"max": 34106.0,
"count": 28
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5828722677676075,
"min": -0.9168375506997108,
"max": 1.6255471057247841,
"count": 28
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 131.37839822471142,
"min": -29.338801622390747,
"max": 141.42259819805622,
"count": 28
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5828722677676075,
"min": -0.9168375506997108,
"max": 1.6255471057247841,
"count": 28
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 131.37839822471142,
"min": -29.338801622390747,
"max": 141.42259819805622,
"count": 28
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04427189766345209,
"min": 0.041951999594207985,
"max": 0.4914944805695038,
"count": 28
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6745675060665235,
"min": 3.523967965913471,
"max": 13.842409016564488,
"count": 28
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 28
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 28
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678814594",
"python_version": "3.7.16 (default, Dec 7 2022, 01:12:19) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1678816754"
},
"total": 2160.6701275600003,
"count": 1,
"self": 0.48016576300051383,
"children": {
"run_training.setup": {
"total": 0.02151566200018351,
"count": 1,
"self": 0.02151566200018351
},
"TrainerController.start_learning": {
"total": 2160.1684461349996,
"count": 1,
"self": 1.4157153749260942,
"children": {
"TrainerController._reset_env": {
"total": 4.789028941999732,
"count": 1,
"self": 4.789028941999732
},
"TrainerController.advance": {
"total": 2153.876120673074,
"count": 54321,
"self": 1.619444891031435,
"children": {
"env_step": {
"total": 1438.6179821589963,
"count": 54321,
"self": 1331.5296034100734,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.30317591796711,
"count": 54321,
"self": 4.737883375942147,
"children": {
"TorchPolicy.evaluate": {
"total": 101.56529254202496,
"count": 53176,
"self": 33.905435359045896,
"children": {
"TorchPolicy.sample_actions": {
"total": 67.65985718297907,
"count": 53176,
"self": 67.65985718297907
}
}
}
}
},
"workers": {
"total": 0.7852028309557681,
"count": 54321,
"self": 0.0,
"children": {
"worker_root": {
"total": 2155.28793625392,
"count": 54321,
"is_parallel": true,
"self": 935.1263824629973,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00277465499993923,
"count": 1,
"is_parallel": true,
"self": 0.0010022510000453622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017724039998938679,
"count": 8,
"is_parallel": true,
"self": 0.0017724039998938679
}
}
},
"UnityEnvironment.step": {
"total": 0.05341468400001759,
"count": 1,
"is_parallel": true,
"self": 0.0006515259997286194,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000546003000181372,
"count": 1,
"is_parallel": true,
"self": 0.000546003000181372
},
"communicator.exchange": {
"total": 0.050323500000104104,
"count": 1,
"is_parallel": true,
"self": 0.050323500000104104
},
"steps_from_proto": {
"total": 0.0018936550000034913,
"count": 1,
"is_parallel": true,
"self": 0.0004677750007431314,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00142587999926036,
"count": 8,
"is_parallel": true,
"self": 0.00142587999926036
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.1615537909229,
"count": 54320,
"is_parallel": true,
"self": 29.57933462664232,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.323538012017707,
"count": 54320,
"is_parallel": true,
"self": 22.323538012017707
},
"communicator.exchange": {
"total": 1077.0466785491199,
"count": 54320,
"is_parallel": true,
"self": 1077.0466785491199
},
"steps_from_proto": {
"total": 91.21200260314299,
"count": 54320,
"is_parallel": true,
"self": 23.972011910746005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 67.23999069239699,
"count": 434560,
"is_parallel": true,
"self": 67.23999069239699
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 713.6386936230465,
"count": 54321,
"self": 2.6594292340878383,
"children": {
"process_trajectory": {
"total": 161.49895048795452,
"count": 54321,
"self": 161.29619650795348,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2027539800010345,
"count": 2,
"self": 0.2027539800010345
}
}
},
"_update_policy": {
"total": 549.4803139010041,
"count": 393,
"self": 228.5942838309752,
"children": {
"TorchPPOOptimizer.update": {
"total": 320.88603007002894,
"count": 19377,
"self": 320.88603007002894
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1399997674743645e-06,
"count": 1,
"self": 1.1399997674743645e-06
},
"TrainerController._save_models": {
"total": 0.08758000499983609,
"count": 1,
"self": 0.0028598239996426855,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0847201810001934,
"count": 1,
"self": 0.0847201810001934
}
}
}
}
}
}
}