ppo-Pyramids / run_logs /timers.json
gokuls's picture
First Push
ca39454
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.563139021396637,
"min": 0.563139021396637,
"max": 1.3739315271377563,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16804.068359375,
"min": 16804.068359375,
"max": 41679.5859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989880.0,
"min": 29952.0,
"max": 989880.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989880.0,
"min": 29952.0,
"max": 989880.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.392277330160141,
"min": -0.0955115258693695,
"max": 0.41140443086624146,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 103.9534912109375,
"min": -22.922765731811523,
"max": 109.43357849121094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013862808234989643,
"min": -0.009203605353832245,
"max": 0.3599839508533478,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.6736443042755127,
"min": -2.282494068145752,
"max": 86.39614868164062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07022596293384033,
"min": 0.06578706537851914,
"max": 0.07296769488077684,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9831634810737645,
"min": 0.4903087300539816,
"max": 1.0499436831593687,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01229941763986495,
"min": 0.0005660735842907343,
"max": 0.015471876716002867,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1721918469581093,
"min": 0.006725525295390896,
"max": 0.21660627402404015,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.598818895664288e-06,
"min": 7.598818895664288e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010638346453930003,
"min": 0.00010638346453930003,
"max": 0.0036323695892101996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253290714285715,
"min": 0.10253290714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4354607000000001,
"min": 1.3691136000000002,
"max": 2.6107898,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026303742357142856,
"min": 0.00026303742357142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00368252393,
"min": 0.00368252393,
"max": 0.12109790102000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00980572123080492,
"min": 0.00980572123080492,
"max": 0.5378000736236572,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13728009164333344,
"min": 0.13728009164333344,
"max": 3.7646005153656006,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 467.7096774193548,
"min": 436.6268656716418,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28998.0,
"min": 15984.0,
"max": 32623.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4086539493430228,
"min": -1.0000000521540642,
"max": 1.472763358497284,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 88.74519880861044,
"min": -32.000001668930054,
"max": 104.56619845330715,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4086539493430228,
"min": -1.0000000521540642,
"max": 1.472763358497284,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 88.74519880861044,
"min": -32.000001668930054,
"max": 104.56619845330715,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047425016759902176,
"min": 0.04642637210561203,
"max": 10.782856525853276,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9877760558738373,
"min": 2.9877760558738373,
"max": 172.52570441365242,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677854727",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677856893"
},
"total": 2165.675805238,
"count": 1,
"self": 0.47445792400003484,
"children": {
"run_training.setup": {
"total": 0.1899148239999704,
"count": 1,
"self": 0.1899148239999704
},
"TrainerController.start_learning": {
"total": 2165.01143249,
"count": 1,
"self": 1.3979415850076293,
"children": {
"TrainerController._reset_env": {
"total": 6.367886186999954,
"count": 1,
"self": 6.367886186999954
},
"TrainerController.advance": {
"total": 2157.1587400139924,
"count": 63553,
"self": 1.499449454029218,
"children": {
"env_step": {
"total": 1419.809166608004,
"count": 63553,
"self": 1304.813753643931,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.13796852202142,
"count": 63553,
"self": 4.718620232977514,
"children": {
"TorchPolicy.evaluate": {
"total": 109.41934828904391,
"count": 62550,
"self": 37.01740851701902,
"children": {
"TorchPolicy.sample_actions": {
"total": 72.40193977202489,
"count": 62550,
"self": 72.40193977202489
}
}
}
}
},
"workers": {
"total": 0.8574444420515874,
"count": 63553,
"self": 0.0,
"children": {
"worker_root": {
"total": 2160.391371067985,
"count": 63553,
"is_parallel": true,
"self": 971.0999206480142,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019007619999911185,
"count": 1,
"is_parallel": true,
"self": 0.0006964639999296196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001204298000061499,
"count": 8,
"is_parallel": true,
"self": 0.001204298000061499
}
}
},
"UnityEnvironment.step": {
"total": 0.050587943999971685,
"count": 1,
"is_parallel": true,
"self": 0.0005690809998668556,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043502800008354825,
"count": 1,
"is_parallel": true,
"self": 0.00043502800008354825
},
"communicator.exchange": {
"total": 0.047975332999953935,
"count": 1,
"is_parallel": true,
"self": 0.047975332999953935
},
"steps_from_proto": {
"total": 0.0016085020000673467,
"count": 1,
"is_parallel": true,
"self": 0.00037895700017998024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012295449998873664,
"count": 8,
"is_parallel": true,
"self": 0.0012295449998873664
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1189.2914504199707,
"count": 63552,
"is_parallel": true,
"self": 31.60095906393053,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.007840519966635,
"count": 63552,
"is_parallel": true,
"self": 23.007840519966635
},
"communicator.exchange": {
"total": 1041.935611814044,
"count": 63552,
"is_parallel": true,
"self": 1041.935611814044
},
"steps_from_proto": {
"total": 92.74703902202975,
"count": 63552,
"is_parallel": true,
"self": 21.934320327102114,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.81271869492764,
"count": 508416,
"is_parallel": true,
"self": 70.81271869492764
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 735.8501239519593,
"count": 63553,
"self": 2.5643802200028176,
"children": {
"process_trajectory": {
"total": 163.04678651195752,
"count": 63553,
"self": 162.85391322995758,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1928732819999368,
"count": 2,
"self": 0.1928732819999368
}
}
},
"_update_policy": {
"total": 570.238957219999,
"count": 450,
"self": 221.71720175202438,
"children": {
"TorchPPOOptimizer.update": {
"total": 348.5217554679746,
"count": 22809,
"self": 348.5217554679746
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1249999261053745e-06,
"count": 1,
"self": 1.1249999261053745e-06
},
"TrainerController._save_models": {
"total": 0.08686357900023722,
"count": 1,
"self": 0.0013936870000179624,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08546989200021926,
"count": 1,
"self": 0.08546989200021926
}
}
}
}
}
}
}