rl-ppo-Pyramids / run_logs /timers.json
nov05's picture
first push from colab
ac9f8c5 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21302199363708496,
"min": 0.21302199363708496,
"max": 1.4299259185791016,
"count": 58
},
"Pyramids.Policy.Entropy.sum": {
"value": 6373.6181640625,
"min": 6373.6181640625,
"max": 43378.234375,
"count": 58
},
"Pyramids.Step.mean": {
"value": 1739906.0,
"min": 29952.0,
"max": 1739906.0,
"count": 58
},
"Pyramids.Step.sum": {
"value": 1739906.0,
"min": 29952.0,
"max": 1739906.0,
"count": 58
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7808756232261658,
"min": -0.09550435841083527,
"max": 0.8141438961029053,
"count": 58
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 229.5774383544922,
"min": -23.1120548248291,
"max": 245.05731201171875,
"count": 58
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.017437996342778206,
"min": -0.004383192863315344,
"max": 0.4441741704940796,
"count": 58
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.126770973205566,
"min": -1.1264805793762207,
"max": 105.26927947998047,
"count": 58
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07112395247664297,
"min": 0.0644080444894609,
"max": 0.07578669143203157,
"count": 58
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9957353346730016,
"min": 0.49415558890185995,
"max": 1.061013680048442,
"count": 58
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012997365766090757,
"min": 0.0002416005040601366,
"max": 0.018060553580613082,
"count": 58
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1819631207252706,
"min": 0.003140806552781776,
"max": 0.2709083037091962,
"count": 58
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00012755035034037857,
"min": 0.00012755035034037857,
"max": 0.00029838354339596195,
"count": 58
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0017857049047653,
"min": 0.0017857049047653,
"max": 0.004010039263320266,
"count": 58
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14251676428571428,
"min": 0.14251676428571428,
"max": 0.19946118095238097,
"count": 58
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.9952347,
"min": 1.3962282666666668,
"max": 2.797474566666667,
"count": 58
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004257424752142857,
"min": 0.004257424752142857,
"max": 0.009946171977142856,
"count": 58
},
"Pyramids.Policy.Beta.sum": {
"value": 0.05960394653,
"min": 0.05960394653,
"max": 0.13367430536,
"count": 58
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010542534291744232,
"min": 0.010323203168809414,
"max": 0.43151411414146423,
"count": 58
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14759548008441925,
"min": 0.14452484250068665,
"max": 3.020598888397217,
"count": 58
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 264.7583333333333,
"min": 229.125,
"max": 999.0,
"count": 58
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31771.0,
"min": 15984.0,
"max": 33512.0,
"count": 58
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.685218318675955,
"min": -1.0000000521540642,
"max": 1.7708749865414575,
"count": 58
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 202.22619824111462,
"min": -30.993001624941826,
"max": 227.5271983295679,
"count": 58
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.685218318675955,
"min": -1.0000000521540642,
"max": 1.7708749865414575,
"count": 58
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 202.22619824111462,
"min": -30.993001624941826,
"max": 227.5271983295679,
"count": 58
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.028383254223081168,
"min": 0.026404364296467975,
"max": 8.6148909535259,
"count": 58
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.40599050676974,
"min": 3.1642717106224154,
"max": 137.8382552564144,
"count": 58
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 58
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 58
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707534601",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707538726"
},
"total": 4125.246884005,
"count": 1,
"self": 0.4197832119998566,
"children": {
"run_training.setup": {
"total": 0.053459410999948886,
"count": 1,
"self": 0.053459410999948886
},
"TrainerController.start_learning": {
"total": 4124.773641382,
"count": 1,
"self": 2.3762352360690784,
"children": {
"TrainerController._reset_env": {
"total": 2.9119080120003673,
"count": 1,
"self": 2.9119080120003673
},
"TrainerController.advance": {
"total": 4119.336418957931,
"count": 114232,
"self": 2.5098101548437626,
"children": {
"env_step": {
"total": 2994.0612274481214,
"count": 114232,
"self": 2765.8155450451995,
"children": {
"SubprocessEnvManager._take_step": {
"total": 226.817101242048,
"count": 114232,
"self": 8.190596317003383,
"children": {
"TorchPolicy.evaluate": {
"total": 218.62650492504463,
"count": 110497,
"self": 218.62650492504463
}
}
},
"workers": {
"total": 1.4285811608738186,
"count": 114231,
"self": 0.0,
"children": {
"worker_root": {
"total": 4115.741242391978,
"count": 114231,
"is_parallel": true,
"self": 1552.5245138059254,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020951250003236055,
"count": 1,
"is_parallel": true,
"self": 0.0006537070003105327,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014414180000130727,
"count": 8,
"is_parallel": true,
"self": 0.0014414180000130727
}
}
},
"UnityEnvironment.step": {
"total": 0.06068620699988969,
"count": 1,
"is_parallel": true,
"self": 0.0005797749995508639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004186720002508082,
"count": 1,
"is_parallel": true,
"self": 0.0004186720002508082
},
"communicator.exchange": {
"total": 0.05810506199986776,
"count": 1,
"is_parallel": true,
"self": 0.05810506199986776
},
"steps_from_proto": {
"total": 0.0015826980002202617,
"count": 1,
"is_parallel": true,
"self": 0.0003617320003286295,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012209659998916322,
"count": 8,
"is_parallel": true,
"self": 0.0012209659998916322
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2563.2167285860523,
"count": 114230,
"is_parallel": true,
"self": 62.629328094092216,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 43.972513573908145,
"count": 114230,
"is_parallel": true,
"self": 43.972513573908145
},
"communicator.exchange": {
"total": 2280.2044378101,
"count": 114230,
"is_parallel": true,
"self": 2280.2044378101
},
"steps_from_proto": {
"total": 176.410449107952,
"count": 114230,
"is_parallel": true,
"self": 35.051696968574106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 141.35875213937788,
"count": 913840,
"is_parallel": true,
"self": 141.35875213937788
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1122.765381354965,
"count": 114231,
"self": 4.781512252900484,
"children": {
"process_trajectory": {
"total": 226.88922172106504,
"count": 114231,
"self": 226.56816519306494,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3210565280000992,
"count": 3,
"self": 0.3210565280000992
}
}
},
"_update_policy": {
"total": 891.0946473809995,
"count": 821,
"self": 520.5461531090036,
"children": {
"TorchPPOOptimizer.update": {
"total": 370.5484942719959,
"count": 40221,
"self": 370.5484942719959
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6920002963161096e-06,
"count": 1,
"self": 1.6920002963161096e-06
},
"TrainerController._save_models": {
"total": 0.14907748399946286,
"count": 1,
"self": 0.0019383039998501772,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14713917999961268,
"count": 1,
"self": 0.14713917999961268
}
}
}
}
}
}
}