ppo-Pyramids / run_logs /timers.json
dragovoid's picture
First Push
d052bb4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3922267556190491,
"min": 0.3866303563117981,
"max": 1.399727463722229,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11722.873046875,
"min": 11567.98046875,
"max": 42462.1328125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989934.0,
"min": 29892.0,
"max": 989934.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989934.0,
"min": 29892.0,
"max": 989934.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49660590291023254,
"min": -0.11000493168830872,
"max": 0.5368474125862122,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 139.04965209960938,
"min": -26.511188507080078,
"max": 146.55934143066406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0040316455997526646,
"min": -0.00504383584484458,
"max": 0.38489019870758057,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.1288607120513916,
"min": -1.2710466384887695,
"max": 91.21897888183594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06758369111394998,
"min": 0.06590624569784594,
"max": 0.07301912019084697,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9461716755952997,
"min": 0.48804675590115776,
"max": 1.0774935156394563,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015542732160178102,
"min": 0.000428433386609722,
"max": 0.015997215332823715,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21759825024249344,
"min": 0.0055696340259263856,
"max": 0.22396101465953203,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.364090402478571e-06,
"min": 7.364090402478571e-06,
"max": 0.00029523548730245715,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010309726563469999,
"min": 0.00010309726563469999,
"max": 0.0035078624307125993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245466428571427,
"min": 0.10245466428571427,
"max": 0.19841182857142856,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4343652999999998,
"min": 1.3888828,
"max": 2.5692874,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025522096214285715,
"min": 0.00025522096214285715,
"max": 0.009841341674285714,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00357309347,
"min": 0.00357309347,
"max": 0.11695181125999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00981693435460329,
"min": 0.009750180877745152,
"max": 0.45569881796836853,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13743707537651062,
"min": 0.13650253415107727,
"max": 3.189891815185547,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 366.61176470588236,
"min": 365.4457831325301,
"max": 993.84375,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31162.0,
"min": 16563.0,
"max": 34146.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5392352720393854,
"min": -0.931050052633509,
"max": 1.566283528846276,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 130.83499812334776,
"min": -30.576401762664318,
"max": 130.83499812334776,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5392352720393854,
"min": -0.931050052633509,
"max": 1.566283528846276,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 130.83499812334776,
"min": -30.576401762664318,
"max": 130.83499812334776,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03763323975363164,
"min": 0.03600312006086145,
"max": 8.886781034224173,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1988253790586896,
"min": 2.9882589650515,
"max": 151.07527758181095,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733594254",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1733596466"
},
"total": 2211.90384996,
"count": 1,
"self": 0.7673126810000213,
"children": {
"run_training.setup": {
"total": 0.056223680000130116,
"count": 1,
"self": 0.056223680000130116
},
"TrainerController.start_learning": {
"total": 2211.080313599,
"count": 1,
"self": 1.3059139290407984,
"children": {
"TrainerController._reset_env": {
"total": 2.229277563000096,
"count": 1,
"self": 2.229277563000096
},
"TrainerController.advance": {
"total": 2207.4124035149594,
"count": 63751,
"self": 1.4212650069553092,
"children": {
"env_step": {
"total": 1523.1148456730336,
"count": 63751,
"self": 1377.8534226279774,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.4874977150091,
"count": 63751,
"self": 4.512598082088289,
"children": {
"TorchPolicy.evaluate": {
"total": 139.97489963292082,
"count": 62559,
"self": 139.97489963292082
}
}
},
"workers": {
"total": 0.7739253300471773,
"count": 63751,
"self": 0.0,
"children": {
"worker_root": {
"total": 2206.268048386041,
"count": 63751,
"is_parallel": true,
"self": 940.7973410330319,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020193970001400885,
"count": 1,
"is_parallel": true,
"self": 0.0006225409997568931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013968560003831954,
"count": 8,
"is_parallel": true,
"self": 0.0013968560003831954
}
}
},
"UnityEnvironment.step": {
"total": 0.04643912699998509,
"count": 1,
"is_parallel": true,
"self": 0.0006761290001122688,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004438519999894197,
"count": 1,
"is_parallel": true,
"self": 0.0004438519999894197
},
"communicator.exchange": {
"total": 0.04370923400006177,
"count": 1,
"is_parallel": true,
"self": 0.04370923400006177
},
"steps_from_proto": {
"total": 0.0016099119998216338,
"count": 1,
"is_parallel": true,
"self": 0.0003433869999298622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012665249998917716,
"count": 8,
"is_parallel": true,
"self": 0.0012665249998917716
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1265.470707353009,
"count": 63750,
"is_parallel": true,
"self": 33.05608089100588,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.64509350000685,
"count": 63750,
"is_parallel": true,
"self": 22.64509350000685
},
"communicator.exchange": {
"total": 1115.1921159449817,
"count": 63750,
"is_parallel": true,
"self": 1115.1921159449817
},
"steps_from_proto": {
"total": 94.57741701701457,
"count": 63750,
"is_parallel": true,
"self": 18.65953094308884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.91788607392573,
"count": 510000,
"is_parallel": true,
"self": 75.91788607392573
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 682.8762928349702,
"count": 63751,
"self": 2.451957529953688,
"children": {
"process_trajectory": {
"total": 130.91753675902305,
"count": 63751,
"self": 130.68776227902322,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2297744799998327,
"count": 2,
"self": 0.2297744799998327
}
}
},
"_update_policy": {
"total": 549.5067985459934,
"count": 453,
"self": 308.0799111980323,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.42688734796116,
"count": 22785,
"self": 241.42688734796116
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1979996088484768e-06,
"count": 1,
"self": 1.1979996088484768e-06
},
"TrainerController._save_models": {
"total": 0.13271739400033766,
"count": 1,
"self": 0.0014724450002177036,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13124494900011996,
"count": 1,
"self": 0.13124494900011996
}
}
}
}
}
}
}