ppo-Pyramids / run_logs /timers.json
lorenzreyes's picture
Initial Commit
8396835
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5708006024360657,
"min": 0.42833226919174194,
"max": 1.4708755016326904,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17324.939453125,
"min": 12884.234375,
"max": 44620.48046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989943.0,
"min": 29952.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989943.0,
"min": 29952.0,
"max": 989943.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.023499079048633575,
"min": -0.10472418367862701,
"max": 0.11422563344240189,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.686777114868164,
"min": -25.133804321289062,
"max": 27.071475982666016,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02088215947151184,
"min": 0.018395602703094482,
"max": 0.4636871814727783,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.053482532501221,
"min": 4.525318145751953,
"max": 111.74861145019531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06973046125535637,
"min": 0.0627924472210939,
"max": 0.07401389274539621,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9762264575749892,
"min": 0.511833665656306,
"max": 1.02404184554368,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0032879300028694045,
"min": 5.180696372184087e-05,
"max": 0.011883177638444407,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.046031020040171663,
"min": 0.0007252974921057721,
"max": 0.09525299894220571,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.550647483150001e-06,
"min": 7.550647483150001e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010570906476410001,
"min": 0.00010570906476410001,
"max": 0.0033754576748475,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251684999999999,
"min": 0.10251684999999999,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352359,
"min": 1.3886848,
"max": 2.4251525,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026143331500000003,
"min": 0.00026143331500000003,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00366006641,
"min": 0.00366006641,
"max": 0.11253273475000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02031579241156578,
"min": 0.02010221965610981,
"max": 0.6815212965011597,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.28442108631134033,
"min": 0.28143107891082764,
"max": 4.770648956298828,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 903.0909090909091,
"min": 827.027027027027,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29802.0,
"min": 15984.0,
"max": 33131.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.4793030714446848,
"min": -1.0000000521540642,
"max": -0.01655139451896822,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -15.817001357674599,
"min": -30.139001734554768,
"max": -0.6124015972018242,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.4793030714446848,
"min": -1.0000000521540642,
"max": -0.01655139451896822,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -15.817001357674599,
"min": -30.139001734554768,
"max": -0.6124015972018242,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.1911653084680438,
"min": 0.1789688714182105,
"max": 14.141306975856423,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.3084551794454455,
"min": 5.90584307257086,
"max": 226.26091161370277,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703312035",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/rlunit5_pyramid_config.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703314083"
},
"total": 2048.0643324380003,
"count": 1,
"self": 0.8068657950002489,
"children": {
"run_training.setup": {
"total": 0.05525127999999313,
"count": 1,
"self": 0.05525127999999313
},
"TrainerController.start_learning": {
"total": 2047.202215363,
"count": 1,
"self": 1.426885606002088,
"children": {
"TrainerController._reset_env": {
"total": 3.5284362290000217,
"count": 1,
"self": 3.5284362290000217
},
"TrainerController.advance": {
"total": 2042.1278393359976,
"count": 63230,
"self": 1.4607886760625206,
"children": {
"env_step": {
"total": 1403.2713328229486,
"count": 63230,
"self": 1272.5895367218998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.86931852602578,
"count": 63230,
"self": 4.663532566035769,
"children": {
"TorchPolicy.evaluate": {
"total": 125.20578595999001,
"count": 62576,
"self": 125.20578595999001
}
}
},
"workers": {
"total": 0.812477575023081,
"count": 63230,
"self": 0.0,
"children": {
"worker_root": {
"total": 2042.0379567009934,
"count": 63230,
"is_parallel": true,
"self": 887.171282867042,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007601380999972207,
"count": 1,
"is_parallel": true,
"self": 0.0035518930000080218,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004049487999964185,
"count": 8,
"is_parallel": true,
"self": 0.004049487999964185
}
}
},
"UnityEnvironment.step": {
"total": 0.06000419200000806,
"count": 1,
"is_parallel": true,
"self": 0.000649699000007331,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000496616000020822,
"count": 1,
"is_parallel": true,
"self": 0.000496616000020822
},
"communicator.exchange": {
"total": 0.057139930999994704,
"count": 1,
"is_parallel": true,
"self": 0.057139930999994704
},
"steps_from_proto": {
"total": 0.0017179459999852043,
"count": 1,
"is_parallel": true,
"self": 0.00037093499992124634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001347011000063958,
"count": 8,
"is_parallel": true,
"self": 0.001347011000063958
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1154.8666738339514,
"count": 63229,
"is_parallel": true,
"self": 35.45512123395997,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.310671551992755,
"count": 63229,
"is_parallel": true,
"self": 24.310671551992755
},
"communicator.exchange": {
"total": 996.5013080150507,
"count": 63229,
"is_parallel": true,
"self": 996.5013080150507
},
"steps_from_proto": {
"total": 98.599573032948,
"count": 63229,
"is_parallel": true,
"self": 19.505024316975494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.09454871597251,
"count": 505832,
"is_parallel": true,
"self": 79.09454871597251
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 637.3957178369865,
"count": 63230,
"self": 2.6061248550181517,
"children": {
"process_trajectory": {
"total": 125.45382269896936,
"count": 63230,
"self": 125.21713044296916,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23669225600019672,
"count": 2,
"self": 0.23669225600019672
}
}
},
"_update_policy": {
"total": 509.33577028299896,
"count": 443,
"self": 302.5042601789859,
"children": {
"TorchPPOOptimizer.update": {
"total": 206.8315101040131,
"count": 22764,
"self": 206.8315101040131
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2650002645386849e-06,
"count": 1,
"self": 1.2650002645386849e-06
},
"TrainerController._save_models": {
"total": 0.11905292700021164,
"count": 1,
"self": 0.0020802680000997498,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11697265900011189,
"count": 1,
"self": 0.11697265900011189
}
}
}
}
}
}
}