ppo-Pyramids / run_logs /timers.json
bensalem14's picture
First Push
237d8f4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2537256181240082,
"min": 0.2537256181240082,
"max": 1.4032658338546753,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7660.48388671875,
"min": 7660.48388671875,
"max": 42569.47265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989888.0,
"min": 29929.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989888.0,
"min": 29929.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6219428181648254,
"min": -0.10362941771745682,
"max": 0.6587494015693665,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 177.2537078857422,
"min": -24.974689483642578,
"max": 189.71983337402344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006579759530723095,
"min": 0.0002667423104867339,
"max": 0.3138495087623596,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.8752315044403076,
"min": 0.07041996717453003,
"max": 74.38233184814453,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06919829887577661,
"min": 0.06427101832278091,
"max": 0.07309898245356246,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9687761842608725,
"min": 0.49633130709735684,
"max": 1.0233857543498743,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015242705704528233,
"min": 0.0013174702584383279,
"max": 0.017466594770320667,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21339787986339526,
"min": 0.017127113359698264,
"max": 0.24453232678448936,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.708361716292855e-06,
"min": 7.708361716292855e-06,
"max": 0.0002952339015887,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010791706402809996,
"min": 0.00010791706402809996,
"max": 0.0037578856473715,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256942142857141,
"min": 0.10256942142857141,
"max": 0.1984113,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359718999999997,
"min": 1.3888791,
"max": 2.652628500000001,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002666852007142857,
"min": 0.0002666852007142857,
"max": 0.009841288870000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00373359281,
"min": 0.00373359281,
"max": 0.12527758715,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008850283920764923,
"min": 0.008850283920764923,
"max": 0.46593397855758667,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12390397489070892,
"min": 0.12390397489070892,
"max": 3.261537790298462,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 301.5612244897959,
"min": 299.5353535353535,
"max": 984.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29553.0,
"min": 16728.0,
"max": 34486.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6576101846080653,
"min": -0.9067400512595971,
"max": 1.6802565566819123,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 162.4457980915904,
"min": -27.202201537787914,
"max": 166.34539911150932,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6576101846080653,
"min": -0.9067400512595971,
"max": 1.6802565566819123,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 162.4457980915904,
"min": -27.202201537787914,
"max": 166.34539911150932,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02836696075187041,
"min": 0.02836696075187041,
"max": 8.625538729569492,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7799621536833,
"min": 2.662328138947487,
"max": 146.63415840268135,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1755306764",
"python_version": "3.10.12 (main, May 27 2025, 17:12:29) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1755309312"
},
"total": 2547.3937722520004,
"count": 1,
"self": 0.47641853999903105,
"children": {
"run_training.setup": {
"total": 0.02963032700063195,
"count": 1,
"self": 0.02963032700063195
},
"TrainerController.start_learning": {
"total": 2546.8877233850008,
"count": 1,
"self": 1.7177365070656379,
"children": {
"TrainerController._reset_env": {
"total": 2.789496796000094,
"count": 1,
"self": 2.789496796000094
},
"TrainerController.advance": {
"total": 2542.299325309935,
"count": 64257,
"self": 1.706513455986169,
"children": {
"env_step": {
"total": 1807.6086052999399,
"count": 64257,
"self": 1630.1957781668516,
"children": {
"SubprocessEnvManager._take_step": {
"total": 176.40522902528392,
"count": 64257,
"self": 5.3379845535137065,
"children": {
"TorchPolicy.evaluate": {
"total": 171.0672444717702,
"count": 62555,
"self": 171.0672444717702
}
}
},
"workers": {
"total": 1.0075981078043696,
"count": 64257,
"self": 0.0,
"children": {
"worker_root": {
"total": 2540.971787855925,
"count": 64257,
"is_parallel": true,
"self": 1043.955879762092,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005064510000011069,
"count": 1,
"is_parallel": true,
"self": 0.003121334999377723,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001943175000633346,
"count": 8,
"is_parallel": true,
"self": 0.001943175000633346
}
}
},
"UnityEnvironment.step": {
"total": 0.04776530500021181,
"count": 1,
"is_parallel": true,
"self": 0.000530319000063173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044379999962984584,
"count": 1,
"is_parallel": true,
"self": 0.00044379999962984584
},
"communicator.exchange": {
"total": 0.04519451600026514,
"count": 1,
"is_parallel": true,
"self": 0.04519451600026514
},
"steps_from_proto": {
"total": 0.0015966700002536527,
"count": 1,
"is_parallel": true,
"self": 0.00035766500150202774,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001239004998751625,
"count": 8,
"is_parallel": true,
"self": 0.001239004998751625
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1497.0159080938329,
"count": 64256,
"is_parallel": true,
"self": 32.96387114553181,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.889211704004992,
"count": 64256,
"is_parallel": true,
"self": 22.889211704004992
},
"communicator.exchange": {
"total": 1341.5043087061513,
"count": 64256,
"is_parallel": true,
"self": 1341.5043087061513
},
"steps_from_proto": {
"total": 99.6585165381448,
"count": 64256,
"is_parallel": true,
"self": 20.548382810269686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.11013372787511,
"count": 514048,
"is_parallel": true,
"self": 79.11013372787511
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 732.9842065540088,
"count": 64257,
"self": 3.2854362777752613,
"children": {
"process_trajectory": {
"total": 145.00276062023386,
"count": 64257,
"self": 144.7168891242336,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2858714960002544,
"count": 2,
"self": 0.2858714960002544
}
}
},
"_update_policy": {
"total": 584.6960096559997,
"count": 459,
"self": 325.0875885679525,
"children": {
"TorchPPOOptimizer.update": {
"total": 259.6084210880472,
"count": 22809,
"self": 259.6084210880472
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.850001904647797e-07,
"count": 1,
"self": 8.850001904647797e-07
},
"TrainerController._save_models": {
"total": 0.08116388700000243,
"count": 1,
"self": 0.0013598799996543676,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07980400700034807,
"count": 1,
"self": 0.07980400700034807
}
}
}
}
}
}
}