Pyramids / run_logs /timers.json
Adi070204's picture
Final PPO + Curiosity model trained for 10M steps using Optuna best parameters
b0313cf verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.11422315239906311,
"min": 0.10635818541049957,
"max": 1.6032832860946655,
"count": 500
},
"Pyramids.Policy.Entropy.sum": {
"value": 2288.1181640625,
"min": 2093.129150390625,
"max": 32219.58203125,
"count": 500
},
"Pyramids.Step.mean": {
"value": 9999964.0,
"min": 19968.0,
"max": 9999964.0,
"count": 500
},
"Pyramids.Step.sum": {
"value": 9999964.0,
"min": 19968.0,
"max": 9999964.0,
"count": 500
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8705893158912659,
"min": -0.08696519583463669,
"max": 0.9940770268440247,
"count": 500
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 174.11785888671875,
"min": -14.001396179199219,
"max": 211.3995361328125,
"count": 500
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.10578813403844833,
"min": 0.10449855774641037,
"max": 4.689557075500488,
"count": 500
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 21.15762710571289,
"min": 20.89971160888672,
"max": 753.0303955078125,
"count": 500
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.05006951541872695,
"min": 0.04121422056505253,
"max": 0.058810766625499944,
"count": 500
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.1001390308374539,
"min": 0.04986661850416567,
"max": 0.16531687479982793,
"count": 500
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.009665742996730842,
"min": 0.0007032624918846362,
"max": 0.2743947749001611,
"count": 500
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.019331485993461683,
"min": 0.0014065249837692724,
"max": 0.5487895498003222,
"count": 500
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.6590310445924732e-07,
"min": 1.6590310445924732e-07,
"max": 0.0001371591941373607,
"count": 500
},
"Pyramids.Policy.LearningRate.sum": {
"value": 3.3180620891849464e-07,
"min": 3.3180620891849464e-07,
"max": 0.00040818917248026574,
"count": 500
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10009065642186618,
"min": 0.10009065642186618,
"max": 0.1749946916944136,
"count": 500
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.20018131284373236,
"min": 0.1749946916944136,
"max": 0.5231860660591037,
"count": 500
},
"Pyramids.Policy.Beta.mean": {
"value": 2.630691935404572e-05,
"min": 2.630691935404572e-05,
"max": 0.0134997491459314,
"count": 500
},
"Pyramids.Policy.Beta.sum": {
"value": 5.261383870809144e-05,
"min": 5.261383870809144e-05,
"max": 0.04017582867108255,
"count": 500
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.024805564935377333,
"min": 0.022652438281511422,
"max": 5.399273802340031,
"count": 500
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 0.04961112987075467,
"min": 0.045304876563022844,
"max": 5.430380754878646,
"count": 500
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.01703787657424982,
"min": 0.012451664861146128,
"max": 1.5587653540074826,
"count": 500
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.03407575314849964,
"min": 0.024903329722292256,
"max": 2.800841969761409,
"count": 500
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 190.90196078431373,
"min": 168.5137614678899,
"max": 999.0,
"count": 500
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 19472.0,
"min": 14919.0,
"max": 31968.0,
"count": 500
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.8090980330518647,
"min": -1.0000000521540642,
"max": 1.8314862271787924,
"count": 500
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 184.5279993712902,
"min": -31.00000161677599,
"max": 213.67299868166447,
"count": 500
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.8090980330518647,
"min": -1.0000000521540642,
"max": 1.8314862271787924,
"count": 500
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 184.5279993712902,
"min": -31.00000161677599,
"max": 213.67299868166447,
"count": 500
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 0.15285808866496617,
"min": 0.1338517680683603,
"max": 102.52869310975075,
"count": 500
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 15.59152504382655,
"min": 13.920583879109472,
"max": 1691.8864068984985,
"count": 500
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1767769871",
"python_version": "3.10.11 (tags/v3.10.11:7d4cc5a, Apr 5 2023, 00:38:17) [MSC v.1929 64 bit (AMD64)]",
"command_line_arguments": "C:\\python\\Reinforcement-Learning\\ml-agents\\.venv\\Scripts\\mlagents-learn optuna_runs\\pyramids_final_best\\final_config.yaml --env Builds\\UnityEnvironment.exe --run-id pyramids_final_best --base-port 8000 --num-envs 1 --force --time-scale 20 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1767788027"
},
"total": 18156.34620270005,
"count": 1,
"self": 0.4155688000610098,
"children": {
"run_training.setup": {
"total": 0.15365290001500398,
"count": 1,
"self": 0.15365290001500398
},
"TrainerController.start_learning": {
"total": 18155.776980999974,
"count": 1,
"self": 15.2952946331352,
"children": {
"TrainerController._reset_env": {
"total": 7.230988299939781,
"count": 1,
"self": 7.230988299939781
},
"TrainerController.advance": {
"total": 18133.08894036687,
"count": 659181,
"self": 14.994775475468487,
"children": {
"env_step": {
"total": 9079.980078772176,
"count": 659181,
"self": 7798.481798222056,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1271.3684725227067,
"count": 659181,
"self": 44.13604481180664,
"children": {
"TorchPolicy.evaluate": {
"total": 1227.2324277109,
"count": 625065,
"self": 1227.2324277109
}
}
},
"workers": {
"total": 10.129808027413674,
"count": 659181,
"self": 0.0,
"children": {
"worker_root": {
"total": 18128.24424395815,
"count": 659181,
"is_parallel": true,
"self": 11425.022673878819,
"children": {
"steps_from_proto": {
"total": 0.002118999953381717,
"count": 1,
"is_parallel": true,
"self": 0.0002384998369961977,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018805001163855195,
"count": 8,
"is_parallel": true,
"self": 0.0018805001163855195
}
}
},
"UnityEnvironment.step": {
"total": 6703.219451079378,
"count": 659181,
"is_parallel": true,
"self": 202.9730432110373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 169.48975574749056,
"count": 659181,
"is_parallel": true,
"self": 169.48975574749056
},
"communicator.exchange": {
"total": 5697.137891348568,
"count": 659181,
"is_parallel": true,
"self": 5697.137891348568
},
"steps_from_proto": {
"total": 633.6187607722823,
"count": 659181,
"is_parallel": true,
"self": 150.71688713051844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 482.9018736417638,
"count": 5273448,
"is_parallel": true,
"self": 482.9018736417638
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 9038.114086119225,
"count": 659181,
"self": 24.974240463227034,
"children": {
"process_trajectory": {
"total": 1428.586373554077,
"count": 659181,
"self": 1427.2644285539864,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3219450000906363,
"count": 20,
"self": 1.3219450000906363
}
}
},
"_update_policy": {
"total": 7584.553472101921,
"count": 1207,
"self": 5087.527661683969,
"children": {
"TorchPPOOptimizer.update": {
"total": 2497.025810417952,
"count": 154944,
"self": 2497.025810417952
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.00003807246685e-07,
"count": 1,
"self": 5.00003807246685e-07
},
"TrainerController._save_models": {
"total": 0.16175720002502203,
"count": 1,
"self": 0.1135820000199601,
"children": {
"RLTrainer._checkpoint": {
"total": 0.048175200005061924,
"count": 1,
"self": 0.048175200005061924
}
}
}
}
}
}
}