ppo-PyramidsRND / run_logs /timers.json
magixn's picture
First Push
4a3d429 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.567835807800293,
"min": 0.5515556335449219,
"max": 1.5129880905151367,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17125.927734375,
"min": 16573.14453125,
"max": 45898.0078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989911.0,
"min": 29952.0,
"max": 989911.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1350151002407074,
"min": -0.1060100719332695,
"max": 0.29201069474220276,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 34.0238037109375,
"min": -25.54842758178711,
"max": 74.17071533203125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -4.147328853607178,
"min": -4.651304244995117,
"max": 0.7690960764884949,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1045.1268310546875,
"min": -1172.128662109375,
"max": 195.35040283203125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06949122590959741,
"min": 0.06613743513381741,
"max": 0.07472956295496798,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9728771627343639,
"min": 0.5231069406847758,
"max": 1.0952176370814344,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 5.510113946046075,
"min": 0.00017570725108646216,
"max": 5.510113946046075,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 77.14159524464505,
"min": 0.002108487013037546,
"max": 77.14159524464505,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.445561803892855e-06,
"min": 7.445561803892855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010423786525449996,
"min": 0.00010423786525449996,
"max": 0.003382128272623999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248182142857144,
"min": 0.10248182142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347455000000002,
"min": 1.3886848,
"max": 2.5273760000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002579339607142857,
"min": 0.0002579339607142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00361107545,
"min": 0.00361107545,
"max": 0.11276486239999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009945386089384556,
"min": 0.009945386089384556,
"max": 0.3845086991786957,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13923540711402893,
"min": 0.13923540711402893,
"max": 2.691560983657837,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 677.6818181818181,
"min": 567.7708333333334,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29818.0,
"min": 15984.0,
"max": 33159.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8221863299946893,
"min": -1.0000000521540642,
"max": 1.2172519733011722,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 36.17619851976633,
"min": -30.996801614761353,
"max": 60.86259866505861,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8221863299946893,
"min": -1.0000000521540642,
"max": 1.2172519733011722,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 36.17619851976633,
"min": -30.996801614761353,
"max": 60.86259866505861,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07138115523742851,
"min": 0.06552381836576388,
"max": 7.902815560810268,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1407708304468542,
"min": 3.106235870262026,
"max": 126.44504897296429,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708952974",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708955047"
},
"total": 2073.314881357,
"count": 1,
"self": 0.8871173770003224,
"children": {
"run_training.setup": {
"total": 0.05553610399999798,
"count": 1,
"self": 0.05553610399999798
},
"TrainerController.start_learning": {
"total": 2072.3722278759997,
"count": 1,
"self": 1.3106213869796193,
"children": {
"TrainerController._reset_env": {
"total": 3.634456139000008,
"count": 1,
"self": 3.634456139000008
},
"TrainerController.advance": {
"total": 2067.30223039902,
"count": 63285,
"self": 1.4252577709748948,
"children": {
"env_step": {
"total": 1453.9095709520104,
"count": 63285,
"self": 1321.8223451131144,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.2342972609548,
"count": 63285,
"self": 4.822118682984183,
"children": {
"TorchPolicy.evaluate": {
"total": 126.41217857797062,
"count": 62555,
"self": 126.41217857797062
}
}
},
"workers": {
"total": 0.8529285779412703,
"count": 63285,
"self": 0.0,
"children": {
"worker_root": {
"total": 2067.3017278860016,
"count": 63285,
"is_parallel": true,
"self": 861.3552977690465,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0052952490000279795,
"count": 1,
"is_parallel": true,
"self": 0.003835562000062964,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014596869999650153,
"count": 8,
"is_parallel": true,
"self": 0.0014596869999650153
}
}
},
"UnityEnvironment.step": {
"total": 0.07007714900009887,
"count": 1,
"is_parallel": true,
"self": 0.0005817730002490862,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005005339999115677,
"count": 1,
"is_parallel": true,
"self": 0.0005005339999115677
},
"communicator.exchange": {
"total": 0.06593941799997083,
"count": 1,
"is_parallel": true,
"self": 0.06593941799997083
},
"steps_from_proto": {
"total": 0.0030554239999673882,
"count": 1,
"is_parallel": true,
"self": 0.00038869399998020526,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002666729999987183,
"count": 8,
"is_parallel": true,
"self": 0.002666729999987183
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1205.946430116955,
"count": 63284,
"is_parallel": true,
"self": 34.81297344202312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.489304756963975,
"count": 63284,
"is_parallel": true,
"self": 25.489304756963975
},
"communicator.exchange": {
"total": 1044.097338256976,
"count": 63284,
"is_parallel": true,
"self": 1044.097338256976
},
"steps_from_proto": {
"total": 101.54681366099192,
"count": 63284,
"is_parallel": true,
"self": 20.41980528286058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.12700837813134,
"count": 506272,
"is_parallel": true,
"self": 81.12700837813134
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 611.9674016760346,
"count": 63285,
"self": 2.5029557340387782,
"children": {
"process_trajectory": {
"total": 123.05485875799525,
"count": 63285,
"self": 122.82204807599544,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23281068199980837,
"count": 2,
"self": 0.23281068199980837
}
}
},
"_update_policy": {
"total": 486.40958718400054,
"count": 441,
"self": 285.39741356502225,
"children": {
"TorchPPOOptimizer.update": {
"total": 201.0121736189783,
"count": 22854,
"self": 201.0121736189783
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2819996300095227e-06,
"count": 1,
"self": 1.2819996300095227e-06
},
"TrainerController._save_models": {
"total": 0.12491866900018067,
"count": 1,
"self": 0.0020246090002729034,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12289405999990777,
"count": 1,
"self": 0.12289405999990777
}
}
}
}
}
}
}