ppo-Pyramids / run_logs /timers.json
Augpiano's picture
First Push
cc67052 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4579711854457855,
"min": 0.4579711854457855,
"max": 1.3707526922225952,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13768.4453125,
"min": 13768.4453125,
"max": 41583.15234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989880.0,
"min": 29952.0,
"max": 989880.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989880.0,
"min": 29952.0,
"max": 989880.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5580952167510986,
"min": -0.11447910964488983,
"max": 0.5580952167510986,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 154.59237670898438,
"min": -27.589466094970703,
"max": 154.7058563232422,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015873625874519348,
"min": 0.004589409101754427,
"max": 0.2668275535106659,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.396994590759277,
"min": 1.1702992916107178,
"max": 64.3054428100586,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06653923519308377,
"min": 0.06334033769962844,
"max": 0.07455073479546366,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9315492927031729,
"min": 0.48955413740213255,
"max": 1.082061585019498,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015357771735050266,
"min": 0.0003114734438457734,
"max": 0.016713839647958854,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21500880429070374,
"min": 0.004360628213840827,
"max": 0.24086655112720715,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.283004715221427e-06,
"min": 7.283004715221427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010196206601309997,
"min": 0.00010196206601309997,
"max": 0.0032605877131375,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242763571428572,
"min": 0.10242763571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339869,
"min": 1.3886848,
"max": 2.4851142000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002525208078571428,
"min": 0.0002525208078571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003535291309999999,
"min": 0.003535291309999999,
"max": 0.10870756375000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013798423111438751,
"min": 0.013798423111438751,
"max": 0.45148032903671265,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19317792356014252,
"min": 0.19317792356014252,
"max": 3.1603622436523438,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 349.02298850574715,
"min": 344.9782608695652,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30365.0,
"min": 15984.0,
"max": 33783.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6049792948981811,
"min": -1.0000000521540642,
"max": 1.6186404873298694,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 139.63319865614176,
"min": -29.804001666605473,
"max": 146.49879829585552,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6049792948981811,
"min": -1.0000000521540642,
"max": 1.6186404873298694,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 139.63319865614176,
"min": -29.804001666605473,
"max": 146.49879829585552,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04992644239396684,
"min": 0.04992644239396684,
"max": 9.079943422228098,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.343600488275115,
"min": 4.343600488275115,
"max": 145.27909475564957,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1764852820",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1764855832"
},
"total": 3012.157673158,
"count": 1,
"self": 0.7343771020000531,
"children": {
"run_training.setup": {
"total": 0.03981624800007921,
"count": 1,
"self": 0.03981624800007921
},
"TrainerController.start_learning": {
"total": 3011.3834798079997,
"count": 1,
"self": 2.203615058977448,
"children": {
"TrainerController._reset_env": {
"total": 2.9371936610000375,
"count": 1,
"self": 2.9371936610000375
},
"TrainerController.advance": {
"total": 3006.1766980690213,
"count": 63725,
"self": 2.310777778107422,
"children": {
"env_step": {
"total": 2023.8581391239832,
"count": 63725,
"self": 1873.0370084668443,
"children": {
"SubprocessEnvManager._take_step": {
"total": 149.486280046116,
"count": 63725,
"self": 6.387955578141373,
"children": {
"TorchPolicy.evaluate": {
"total": 143.09832446797463,
"count": 62539,
"self": 143.09832446797463
}
}
},
"workers": {
"total": 1.33485061102283,
"count": 63725,
"self": 0.0,
"children": {
"worker_root": {
"total": 3003.8598082800313,
"count": 63725,
"is_parallel": true,
"self": 1302.7788933200827,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0040332130001843325,
"count": 1,
"is_parallel": true,
"self": 0.0014401170001292485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002593096000055084,
"count": 8,
"is_parallel": true,
"self": 0.002593096000055084
}
}
},
"UnityEnvironment.step": {
"total": 0.06657822599981955,
"count": 1,
"is_parallel": true,
"self": 0.0006306299997049791,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005150719998709974,
"count": 1,
"is_parallel": true,
"self": 0.0005150719998709974
},
"communicator.exchange": {
"total": 0.06330806799996935,
"count": 1,
"is_parallel": true,
"self": 0.06330806799996935
},
"steps_from_proto": {
"total": 0.002124456000274222,
"count": 1,
"is_parallel": true,
"self": 0.0003914160015483503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017330399987258716,
"count": 8,
"is_parallel": true,
"self": 0.0017330399987258716
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1701.0809149599486,
"count": 63724,
"is_parallel": true,
"self": 44.064415383259075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.23465077286346,
"count": 63724,
"is_parallel": true,
"self": 30.23465077286346
},
"communicator.exchange": {
"total": 1487.008677233951,
"count": 63724,
"is_parallel": true,
"self": 1487.008677233951
},
"steps_from_proto": {
"total": 139.7731715698751,
"count": 63724,
"is_parallel": true,
"self": 29.261240774055295,
"children": {
"_process_rank_one_or_two_observation": {
"total": 110.51193079581981,
"count": 509792,
"is_parallel": true,
"self": 110.51193079581981
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 980.0077811669307,
"count": 63725,
"self": 4.140712278975116,
"children": {
"process_trajectory": {
"total": 148.48261430695175,
"count": 63725,
"self": 148.2356666159517,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24694769100005942,
"count": 2,
"self": 0.24694769100005942
}
}
},
"_update_policy": {
"total": 827.3844545810039,
"count": 446,
"self": 333.3306803440014,
"children": {
"TorchPPOOptimizer.update": {
"total": 494.0537742370025,
"count": 22734,
"self": 494.0537742370025
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.085000803868752e-06,
"count": 1,
"self": 1.085000803868752e-06
},
"TrainerController._save_models": {
"total": 0.06597193400011747,
"count": 1,
"self": 0.0016595759998381254,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06431235800027935,
"count": 1,
"self": 0.06431235800027935
}
}
}
}
}
}
}