ppo-Pyramids / run_logs /timers.json
Nihar14's picture
First Push
cbd817e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6295779347419739,
"min": 0.6211789846420288,
"max": 1.4233251810073853,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18796.6796875,
"min": 18486.287109375,
"max": 43177.9921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989990.0,
"min": 29894.0,
"max": 989990.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989990.0,
"min": 29894.0,
"max": 989990.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.30580422282218933,
"min": -0.09622843563556671,
"max": 0.3740774691104889,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 80.1207046508789,
"min": -23.287281036376953,
"max": 98.00830078125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00023443416284862906,
"min": -0.0655827447772026,
"max": 0.43154728412628174,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.061421751976013184,
"min": -17.117095947265625,
"max": 102.27670288085938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0705793967305605,
"min": 0.0653910528566719,
"max": 0.0756300607139654,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.988111554227847,
"min": 0.5056049138466138,
"max": 1.104247159009834,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013283680319762509,
"min": 0.0012017945767532274,
"max": 0.021028981466043047,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18597152447667512,
"min": 0.015673375331694914,
"max": 0.18597152447667512,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.627104600521428e-06,
"min": 7.627104600521428e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001067794644073,
"min": 0.0001067794644073,
"max": 0.0036328711890429993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254233571428573,
"min": 0.10254233571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355927000000002,
"min": 1.3886848,
"max": 2.610957,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026397933785714294,
"min": 0.00026397933785714294,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003695710730000001,
"min": 0.003695710730000001,
"max": 0.12111460429999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010113668628036976,
"min": 0.009023323655128479,
"max": 0.5782804489135742,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14159135520458221,
"min": 0.1263265311717987,
"max": 4.0479631423950195,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 520.4098360655738,
"min": 458.01666666666665,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31745.0,
"min": 16821.0,
"max": 32775.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2171802870074258,
"min": -0.9999936006722911,
"max": 1.375276642665267,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 74.24799750745296,
"min": -30.999801620841026,
"max": 82.51659855991602,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2171802870074258,
"min": -0.9999936006722911,
"max": 1.375276642665267,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 74.24799750745296,
"min": -30.999801620841026,
"max": 82.51659855991602,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05477043097174619,
"min": 0.04324934251490049,
"max": 11.928728917065788,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.340996289276518,
"min": 2.5949605508940294,
"max": 202.7883915901184,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752471493",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752473554"
},
"total": 2060.567796056,
"count": 1,
"self": 0.4765452040001037,
"children": {
"run_training.setup": {
"total": 0.02534767399993143,
"count": 1,
"self": 0.02534767399993143
},
"TrainerController.start_learning": {
"total": 2060.065903178,
"count": 1,
"self": 1.2561369370046123,
"children": {
"TrainerController._reset_env": {
"total": 2.685796140999855,
"count": 1,
"self": 2.685796140999855
},
"TrainerController.advance": {
"total": 2056.038420210996,
"count": 63495,
"self": 1.2987006140697304,
"children": {
"env_step": {
"total": 1411.453421661015,
"count": 63495,
"self": 1266.183376864758,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.48610840109768,
"count": 63495,
"self": 4.444636610092175,
"children": {
"TorchPolicy.evaluate": {
"total": 140.0414717910055,
"count": 62557,
"self": 140.0414717910055
}
}
},
"workers": {
"total": 0.7839363951593441,
"count": 63495,
"self": 0.0,
"children": {
"worker_root": {
"total": 2055.2296683930053,
"count": 63495,
"is_parallel": true,
"self": 896.5412279060645,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023249629998645105,
"count": 1,
"is_parallel": true,
"self": 0.0006462529997861566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001678710000078354,
"count": 8,
"is_parallel": true,
"self": 0.001678710000078354
}
}
},
"UnityEnvironment.step": {
"total": 0.04716403399993396,
"count": 1,
"is_parallel": true,
"self": 0.0005274469997402775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004698620000453957,
"count": 1,
"is_parallel": true,
"self": 0.0004698620000453957
},
"communicator.exchange": {
"total": 0.04459942700009378,
"count": 1,
"is_parallel": true,
"self": 0.04459942700009378
},
"steps_from_proto": {
"total": 0.0015672980000545067,
"count": 1,
"is_parallel": true,
"self": 0.00033911500054273347,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012281829995117732,
"count": 8,
"is_parallel": true,
"self": 0.0012281829995117732
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1158.6884404869409,
"count": 63494,
"is_parallel": true,
"self": 31.293191762014658,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.707245514046008,
"count": 63494,
"is_parallel": true,
"self": 22.707245514046008
},
"communicator.exchange": {
"total": 1010.2581649399797,
"count": 63494,
"is_parallel": true,
"self": 1010.2581649399797
},
"steps_from_proto": {
"total": 94.42983827090052,
"count": 63494,
"is_parallel": true,
"self": 18.85702306770122,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.5728152031993,
"count": 507952,
"is_parallel": true,
"self": 75.5728152031993
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 643.2862979359109,
"count": 63495,
"self": 2.474387752884695,
"children": {
"process_trajectory": {
"total": 124.92785940803174,
"count": 63495,
"self": 124.67214217303149,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25571723500024746,
"count": 2,
"self": 0.25571723500024746
}
}
},
"_update_policy": {
"total": 515.8840507749944,
"count": 452,
"self": 288.3929744030206,
"children": {
"TorchPPOOptimizer.update": {
"total": 227.4910763719738,
"count": 22791,
"self": 227.4910763719738
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.749998414714355e-07,
"count": 1,
"self": 8.749998414714355e-07
},
"TrainerController._save_models": {
"total": 0.08554901399975279,
"count": 1,
"self": 0.0013305679995028186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08421844600024997,
"count": 1,
"self": 0.08421844600024997
}
}
}
}
}
}
}