ppo-Pyramids / run_logs /timers.json
Subarashi's picture
First Push
dec195e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5814186334609985,
"min": 0.5592879056930542,
"max": 1.4764949083328247,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17554.19140625,
"min": 16903.91796875,
"max": 44790.94921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.17124579846858978,
"min": -0.15393362939357758,
"max": 0.17124579846858978,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 44.010169982910156,
"min": -37.25193786621094,
"max": 44.010169982910156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0039293221198022366,
"min": 0.002365364693105221,
"max": 0.2782208323478699,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.0098358392715454,
"min": 0.5913411974906921,
"max": 67.3294448852539,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06983583501818282,
"min": 0.06491532899372159,
"max": 0.07184738567705835,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9777016902545594,
"min": 0.49804875389646064,
"max": 1.0131324736992646,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011026238495080169,
"min": 7.534382911759324e-05,
"max": 0.011026238495080169,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.15436733893112237,
"min": 0.0010548136076463054,
"max": 0.15436733893112237,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.747747417449997e-06,
"min": 7.747747417449997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010846846384429997,
"min": 0.00010846846384429997,
"max": 0.003635609888130099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258255000000001,
"min": 0.10258255000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361557,
"min": 1.3886848,
"max": 2.6118699000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026799674499999995,
"min": 0.00026799674499999995,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037519544299999996,
"min": 0.0037519544299999996,
"max": 0.12120580300999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011755081824958324,
"min": 0.011755081824958324,
"max": 0.38727813959121704,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.164571151137352,
"min": 0.164571151137352,
"max": 2.710947036743164,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 610.7254901960785,
"min": 610.7254901960785,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31147.0,
"min": 15984.0,
"max": 32960.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7616313468007481,
"min": -1.0000000521540642,
"max": 0.7616313468007481,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 38.84319868683815,
"min": -30.2422017082572,
"max": 38.84319868683815,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7616313468007481,
"min": -1.0000000521540642,
"max": 0.7616313468007481,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 38.84319868683815,
"min": -30.2422017082572,
"max": 38.84319868683815,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07417967817380879,
"min": 0.07417967817380879,
"max": 7.205812143161893,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.783163586864248,
"min": 3.752242462243885,
"max": 115.29299429059029,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740590560",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740592824"
},
"total": 2263.9589436379997,
"count": 1,
"self": 0.6407182250000005,
"children": {
"run_training.setup": {
"total": 0.02191090299993448,
"count": 1,
"self": 0.02191090299993448
},
"TrainerController.start_learning": {
"total": 2263.29631451,
"count": 1,
"self": 1.369886679069623,
"children": {
"TrainerController._reset_env": {
"total": 2.2196713769999405,
"count": 1,
"self": 2.2196713769999405
},
"TrainerController.advance": {
"total": 2259.61339118893,
"count": 63351,
"self": 1.4391257318325188,
"children": {
"env_step": {
"total": 1554.8218773801045,
"count": 63351,
"self": 1393.6062844172634,
"children": {
"SubprocessEnvManager._take_step": {
"total": 160.40866890790858,
"count": 63351,
"self": 4.898199881978599,
"children": {
"TorchPolicy.evaluate": {
"total": 155.51046902592998,
"count": 62551,
"self": 155.51046902592998
}
}
},
"workers": {
"total": 0.8069240549325514,
"count": 63351,
"self": 0.0,
"children": {
"worker_root": {
"total": 2257.7677427810336,
"count": 63351,
"is_parallel": true,
"self": 984.4785886900409,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025419069997951738,
"count": 1,
"is_parallel": true,
"self": 0.000823137998850143,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017187690009450307,
"count": 8,
"is_parallel": true,
"self": 0.0017187690009450307
}
}
},
"UnityEnvironment.step": {
"total": 0.05474524500004918,
"count": 1,
"is_parallel": true,
"self": 0.0005393360002017289,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000566190999961691,
"count": 1,
"is_parallel": true,
"self": 0.000566190999961691
},
"communicator.exchange": {
"total": 0.05190098399998533,
"count": 1,
"is_parallel": true,
"self": 0.05190098399998533
},
"steps_from_proto": {
"total": 0.0017387339999004325,
"count": 1,
"is_parallel": true,
"self": 0.000383410999802436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013553230000979966,
"count": 8,
"is_parallel": true,
"self": 0.0013553230000979966
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.2891540909927,
"count": 63350,
"is_parallel": true,
"self": 33.47225943584999,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.401316229050735,
"count": 63350,
"is_parallel": true,
"self": 25.401316229050735
},
"communicator.exchange": {
"total": 1110.042568701052,
"count": 63350,
"is_parallel": true,
"self": 1110.042568701052
},
"steps_from_proto": {
"total": 104.37300972503999,
"count": 63350,
"is_parallel": true,
"self": 21.175193644151477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.19781608088851,
"count": 506800,
"is_parallel": true,
"self": 83.19781608088851
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 703.3523880769931,
"count": 63351,
"self": 2.6597976290722727,
"children": {
"process_trajectory": {
"total": 135.0890862779138,
"count": 63351,
"self": 134.85044427991397,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2386419979998209,
"count": 2,
"self": 0.2386419979998209
}
}
},
"_update_policy": {
"total": 565.603504170007,
"count": 455,
"self": 311.56091201296385,
"children": {
"TorchPPOOptimizer.update": {
"total": 254.04259215704315,
"count": 22746,
"self": 254.04259215704315
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0320000001229346e-06,
"count": 1,
"self": 1.0320000001229346e-06
},
"TrainerController._save_models": {
"total": 0.0933642330001021,
"count": 1,
"self": 0.0015217700001812773,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09184246299992083,
"count": 1,
"self": 0.09184246299992083
}
}
}
}
}
}
}