ppo-Pyramids1 / run_logs /timers.json
Lahalito's picture
First Push
42307f5 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5151756405830383,
"min": 0.4937754273414612,
"max": 1.4677432775497437,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15430.541015625,
"min": 14639.4541015625,
"max": 44525.4609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989887.0,
"min": 29977.0,
"max": 989887.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989887.0,
"min": 29977.0,
"max": 989887.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5688852071762085,
"min": -0.09507118165493011,
"max": 0.5703299641609192,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.85675048828125,
"min": -22.912155151367188,
"max": 159.85675048828125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02333887480199337,
"min": -0.01473192311823368,
"max": 0.4760156273841858,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.558223724365234,
"min": -3.8597638607025146,
"max": 113.29171752929688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06720001774928754,
"min": 0.06545600569000455,
"max": 0.07301168280548631,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9408002484900254,
"min": 0.476887958406286,
"max": 1.0701088743945173,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013673518291657926,
"min": 0.00012041838231152276,
"max": 0.013749681078194294,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19142925608321096,
"min": 0.0015654389700497959,
"max": 0.19472646024951246,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4197618124928585e-06,
"min": 7.4197618124928585e-06,
"max": 0.00029523733015898577,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010387666537490001,
"min": 0.00010387666537490001,
"max": 0.0036088401970533,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247322142857143,
"min": 0.10247322142857143,
"max": 0.19841244285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346251,
"min": 1.3888871,
"max": 2.5281381,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025707482071428585,
"min": 0.00025707482071428585,
"max": 0.009841403041428571,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035990474900000017,
"min": 0.0035990474900000017,
"max": 0.12030437533,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005797781050205231,
"min": 0.005702413152903318,
"max": 0.30150943994522095,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08116893470287323,
"min": 0.07983378320932388,
"max": 2.1105661392211914,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 344.031914893617,
"min": 327.26666666666665,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32339.0,
"min": 16648.0,
"max": 33206.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.570829764484091,
"min": -0.999880051612854,
"max": 1.5708743967982226,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.65799786150455,
"min": -31.995601654052734,
"max": 147.65799786150455,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.570829764484091,
"min": -0.999880051612854,
"max": 1.5708743967982226,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.65799786150455,
"min": -31.995601654052734,
"max": 147.65799786150455,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02064255455247513,
"min": 0.02014948305213145,
"max": 5.967881978434675,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.9404001279326621,
"min": 1.6609239851823077,
"max": 101.45399363338947,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1772231955",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1772234157"
},
"total": 2202.2745240430004,
"count": 1,
"self": 0.5804008870009056,
"children": {
"run_training.setup": {
"total": 0.023337999999966996,
"count": 1,
"self": 0.023337999999966996
},
"TrainerController.start_learning": {
"total": 2201.6707851559995,
"count": 1,
"self": 1.2881512870003462,
"children": {
"TrainerController._reset_env": {
"total": 2.092294653999943,
"count": 1,
"self": 2.092294653999943
},
"TrainerController.advance": {
"total": 2198.213185983999,
"count": 63846,
"self": 1.3452456050190449,
"children": {
"env_step": {
"total": 1541.093910479035,
"count": 63846,
"self": 1389.3073577249327,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.99965603505893,
"count": 63846,
"self": 4.66069451009821,
"children": {
"TorchPolicy.evaluate": {
"total": 146.33896152496072,
"count": 62537,
"self": 146.33896152496072
}
}
},
"workers": {
"total": 0.7868967190433978,
"count": 63846,
"self": 0.0,
"children": {
"worker_root": {
"total": 2194.312192937073,
"count": 63846,
"is_parallel": true,
"self": 922.394727362089,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018097529998613027,
"count": 1,
"is_parallel": true,
"self": 0.0005509339998752694,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012588189999860333,
"count": 8,
"is_parallel": true,
"self": 0.0012588189999860333
}
}
},
"UnityEnvironment.step": {
"total": 0.04935867499989399,
"count": 1,
"is_parallel": true,
"self": 0.0005379489996357734,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005064480001237825,
"count": 1,
"is_parallel": true,
"self": 0.0005064480001237825
},
"communicator.exchange": {
"total": 0.04668886000013117,
"count": 1,
"is_parallel": true,
"self": 0.04668886000013117
},
"steps_from_proto": {
"total": 0.0016254180000032648,
"count": 1,
"is_parallel": true,
"self": 0.00036965099980079685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001255767000202468,
"count": 8,
"is_parallel": true,
"self": 0.001255767000202468
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1271.9174655749841,
"count": 63845,
"is_parallel": true,
"self": 34.32030016775525,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.701151613018055,
"count": 63845,
"is_parallel": true,
"self": 23.701151613018055
},
"communicator.exchange": {
"total": 1104.6521258711477,
"count": 63845,
"is_parallel": true,
"self": 1104.6521258711477
},
"steps_from_proto": {
"total": 109.24388792306308,
"count": 63845,
"is_parallel": true,
"self": 22.73648192996734,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.50740599309574,
"count": 510760,
"is_parallel": true,
"self": 86.50740599309574
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 655.7740298999454,
"count": 63846,
"self": 2.5770989599429868,
"children": {
"process_trajectory": {
"total": 125.98632838600633,
"count": 63846,
"self": 125.8096104550059,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17671793100043942,
"count": 2,
"self": 0.17671793100043942
}
}
},
"_update_policy": {
"total": 527.2106025539961,
"count": 455,
"self": 291.4364088920104,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.7741936619857,
"count": 22782,
"self": 235.7741936619857
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.030000001075678e-07,
"count": 1,
"self": 9.030000001075678e-07
},
"TrainerController._save_models": {
"total": 0.07715232800001104,
"count": 1,
"self": 0.001103937000152655,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07604839099985838,
"count": 1,
"self": 0.07604839099985838
}
}
}
}
}
}
}