ppo-Pyramids / run_logs /timers.json
Sil3ntt's picture
First Push
b982e6d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4614676833152771,
"min": 0.4614676833152771,
"max": 1.4531798362731934,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13748.044921875,
"min": 13748.044921875,
"max": 44083.6640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989903.0,
"min": 29952.0,
"max": 989903.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.32395437359809875,
"min": -0.09493961185216904,
"max": 0.32395437359809875,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 84.55209350585938,
"min": -22.81021499633789,
"max": 84.55209350585938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.005461744498461485,
"min": -0.005461744498461485,
"max": 0.554766058921814,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.4255152940750122,
"min": -1.4255152940750122,
"max": 131.47955322265625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07079249700538374,
"min": 0.06493950840193355,
"max": 0.0751172183934551,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9910949580753725,
"min": 0.503419830384042,
"max": 1.0653596335597446,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014041112589573876,
"min": 0.0001058971845662172,
"max": 0.025907027806295384,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19657557625403427,
"min": 0.0013766633993608237,
"max": 0.20026599757807603,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3777403979285645e-06,
"min": 7.3777403979285645e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010328836557099991,
"min": 0.00010328836557099991,
"max": 0.003382023272659,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245921428571426,
"min": 0.10245921428571426,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344289999999997,
"min": 1.3691136000000002,
"max": 2.527341,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000255675507142857,
"min": 0.000255675507142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035794570999999977,
"min": 0.0035794570999999977,
"max": 0.1127613659,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013311672024428844,
"min": 0.013311672024428844,
"max": 0.6412887573242188,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18636341392993927,
"min": 0.18636341392993927,
"max": 4.489021301269531,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 508.46875,
"min": 508.46875,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32542.0,
"min": 15984.0,
"max": 32542.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1461904484128196,
"min": -1.0000000521540642,
"max": 1.1791207284297582,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 72.20999825000763,
"min": -32.000001668930054,
"max": 72.20999825000763,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1461904484128196,
"min": -1.0000000521540642,
"max": 1.1791207284297582,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 72.20999825000763,
"min": -32.000001668930054,
"max": 72.20999825000763,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06890848509797323,
"min": 0.06890848509797323,
"max": 14.719729556702077,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.341234561172314,
"min": 4.202804758679122,
"max": 235.51567290723324,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740491185",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740493379"
},
"total": 2194.258244417,
"count": 1,
"self": 0.5762395530000504,
"children": {
"run_training.setup": {
"total": 0.020798137999918254,
"count": 1,
"self": 0.020798137999918254
},
"TrainerController.start_learning": {
"total": 2193.661206726,
"count": 1,
"self": 1.4918594078617389,
"children": {
"TrainerController._reset_env": {
"total": 2.2658494600000267,
"count": 1,
"self": 2.2658494600000267
},
"TrainerController.advance": {
"total": 2189.8113694991384,
"count": 63346,
"self": 1.4644348290385096,
"children": {
"env_step": {
"total": 1499.8649503630136,
"count": 63346,
"self": 1340.393202252083,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.60658152286624,
"count": 63346,
"self": 4.7806833998156435,
"children": {
"TorchPolicy.evaluate": {
"total": 153.8258981230506,
"count": 62564,
"self": 153.8258981230506
}
}
},
"workers": {
"total": 0.8651665880643122,
"count": 63346,
"self": 0.0,
"children": {
"worker_root": {
"total": 2188.4501663039364,
"count": 63346,
"is_parallel": true,
"self": 961.6750679538482,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021431390000543615,
"count": 1,
"is_parallel": true,
"self": 0.0006829520007158862,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014601869993384753,
"count": 8,
"is_parallel": true,
"self": 0.0014601869993384753
}
}
},
"UnityEnvironment.step": {
"total": 0.04889885099964886,
"count": 1,
"is_parallel": true,
"self": 0.0005448549995890062,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044819499998993706,
"count": 1,
"is_parallel": true,
"self": 0.00044819499998993706
},
"communicator.exchange": {
"total": 0.04632270399997651,
"count": 1,
"is_parallel": true,
"self": 0.04632270399997651
},
"steps_from_proto": {
"total": 0.001583097000093403,
"count": 1,
"is_parallel": true,
"self": 0.00033530099972267635,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012477960003707267,
"count": 8,
"is_parallel": true,
"self": 0.0012477960003707267
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1226.7750983500882,
"count": 63345,
"is_parallel": true,
"self": 31.678933815397613,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.63190574592818,
"count": 63345,
"is_parallel": true,
"self": 22.63190574592818
},
"communicator.exchange": {
"total": 1077.0753920578964,
"count": 63345,
"is_parallel": true,
"self": 1077.0753920578964
},
"steps_from_proto": {
"total": 95.38886673086608,
"count": 63345,
"is_parallel": true,
"self": 19.290146913565877,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.0987198173002,
"count": 506760,
"is_parallel": true,
"self": 76.0987198173002
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 688.4819843070863,
"count": 63346,
"self": 2.665504691021397,
"children": {
"process_trajectory": {
"total": 124.63323523008012,
"count": 63346,
"self": 124.42607277708021,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20716245299990987,
"count": 2,
"self": 0.20716245299990987
}
}
},
"_update_policy": {
"total": 561.1832443859848,
"count": 433,
"self": 309.8094519309807,
"children": {
"TorchPPOOptimizer.update": {
"total": 251.37379245500415,
"count": 22890,
"self": 251.37379245500415
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.019997039809823e-07,
"count": 1,
"self": 8.019997039809823e-07
},
"TrainerController._save_models": {
"total": 0.09212755699991249,
"count": 1,
"self": 0.001764455999364145,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09036310100054834,
"count": 1,
"self": 0.09036310100054834
}
}
}
}
}
}
}