ppo-Pyramid / run_logs /timers.json
akter-sust's picture
first run of pyramids rnd
3e471dc
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3047933876514435,
"min": 0.29190945625305176,
"max": 1.3241060972213745,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9138.9248046875,
"min": 8785.306640625,
"max": 40168.08203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989970.0,
"min": 29990.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989970.0,
"min": 29990.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.541854977607727,
"min": -0.07538217306137085,
"max": 0.6535017490386963,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 150.63568115234375,
"min": -18.167102813720703,
"max": 187.5550079345703,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009142345748841763,
"min": -0.024734893813729286,
"max": 0.4143272638320923,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.541572093963623,
"min": -6.752625942230225,
"max": 98.6098861694336,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06956714546282051,
"min": 0.06496935876791775,
"max": 0.07332094755769329,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0435071819423076,
"min": 0.5628616311849755,
"max": 1.0639690269017592,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01707440197933465,
"min": 0.001371907716047841,
"max": 0.01829467566408095,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2561160296900198,
"min": 0.015090984876526251,
"max": 0.2561254592971333,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.475237508286663e-06,
"min": 7.475237508286663e-06,
"max": 0.000294846676717775,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011212856262429995,
"min": 0.00011212856262429995,
"max": 0.0035086100304634,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249171333333336,
"min": 0.10249171333333336,
"max": 0.198282225,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373757000000003,
"min": 1.4775924000000005,
"max": 2.5695366,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000258922162,
"min": 0.000258922162,
"max": 0.0098283942775,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038838324299999994,
"min": 0.0038838324299999994,
"max": 0.11697670634,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009999454021453857,
"min": 0.009999454021453857,
"max": 0.3619423508644104,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14999181032180786,
"min": 0.140217587351799,
"max": 2.895538806915283,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 316.2857142857143,
"min": 283.8207547169811,
"max": 995.21875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28782.0,
"min": 17044.0,
"max": 33716.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5957692188548518,
"min": -0.9336313018575311,
"max": 1.678336432464769,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.2149989157915,
"min": -29.876201659440994,
"max": 179.58199827373028,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5957692188548518,
"min": -0.9336313018575311,
"max": 1.678336432464769,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.2149989157915,
"min": -29.876201659440994,
"max": 179.58199827373028,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03267587999409323,
"min": 0.0303210501163865,
"max": 7.472210440370771,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.973505079462484,
"min": 2.973505079462484,
"max": 134.4997879266739,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698646812",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698649086"
},
"total": 2273.981772696,
"count": 1,
"self": 0.47821383100017556,
"children": {
"run_training.setup": {
"total": 0.06984717499994986,
"count": 1,
"self": 0.06984717499994986
},
"TrainerController.start_learning": {
"total": 2273.4337116899997,
"count": 1,
"self": 1.395256245931705,
"children": {
"TrainerController._reset_env": {
"total": 4.45878972100013,
"count": 1,
"self": 4.45878972100013
},
"TrainerController.advance": {
"total": 2267.5040479530685,
"count": 64179,
"self": 1.5415343681238483,
"children": {
"env_step": {
"total": 1640.8082672210123,
"count": 64179,
"self": 1507.4163487800988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.50345913795354,
"count": 64179,
"self": 4.572251675890811,
"children": {
"TorchPolicy.evaluate": {
"total": 127.93120746206273,
"count": 62563,
"self": 127.93120746206273
}
}
},
"workers": {
"total": 0.8884593029599728,
"count": 64179,
"self": 0.0,
"children": {
"worker_root": {
"total": 2268.5106389389543,
"count": 64179,
"is_parallel": true,
"self": 877.1087153639644,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004541064000022743,
"count": 1,
"is_parallel": true,
"self": 0.002786548999893057,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001754515000129686,
"count": 8,
"is_parallel": true,
"self": 0.001754515000129686
}
}
},
"UnityEnvironment.step": {
"total": 0.047841014999903564,
"count": 1,
"is_parallel": true,
"self": 0.0005941449999227189,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004578440000386763,
"count": 1,
"is_parallel": true,
"self": 0.0004578440000386763
},
"communicator.exchange": {
"total": 0.0451744309998503,
"count": 1,
"is_parallel": true,
"self": 0.0451744309998503
},
"steps_from_proto": {
"total": 0.0016145950000918674,
"count": 1,
"is_parallel": true,
"self": 0.0003671160000067175,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00124747900008515,
"count": 8,
"is_parallel": true,
"self": 0.00124747900008515
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1391.40192357499,
"count": 64178,
"is_parallel": true,
"self": 34.26499794201891,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.62030514900289,
"count": 64178,
"is_parallel": true,
"self": 23.62030514900289
},
"communicator.exchange": {
"total": 1236.0753736499487,
"count": 64178,
"is_parallel": true,
"self": 1236.0753736499487
},
"steps_from_proto": {
"total": 97.44124683401947,
"count": 64178,
"is_parallel": true,
"self": 19.79334397907587,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.6479028549436,
"count": 513424,
"is_parallel": true,
"self": 77.6479028549436
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 625.1542463639323,
"count": 64179,
"self": 2.680966209954022,
"children": {
"process_trajectory": {
"total": 120.11829369397856,
"count": 64179,
"self": 119.958862862979,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15943083099955402,
"count": 2,
"self": 0.15943083099955402
}
}
},
"_update_policy": {
"total": 502.35498645999974,
"count": 457,
"self": 299.144962880044,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.21002357995576,
"count": 22776,
"self": 203.21002357995576
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.229997885995544e-07,
"count": 1,
"self": 9.229997885995544e-07
},
"TrainerController._save_models": {
"total": 0.07561684699976468,
"count": 1,
"self": 0.001368433999687113,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07424841300007756,
"count": 1,
"self": 0.07424841300007756
}
}
}
}
}
}
}