ppo-Pyramids / run_logs /timers.json
wowthecoder's picture
First test run
1ce570e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3855314552783966,
"min": 0.3855314552783966,
"max": 1.378066897392273,
"count": 40
},
"Pyramids.Policy.Entropy.sum": {
"value": 11498.08984375,
"min": 11498.08984375,
"max": 41805.0390625,
"count": 40
},
"Pyramids.Step.mean": {
"value": 1199954.0,
"min": 29952.0,
"max": 1199954.0,
"count": 40
},
"Pyramids.Step.sum": {
"value": 1199954.0,
"min": 29952.0,
"max": 1199954.0,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.514311671257019,
"min": -0.11507374793291092,
"max": 0.5285899639129639,
"count": 40
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 138.86415100097656,
"min": -27.617698669433594,
"max": 144.83364868164062,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.008747557178139687,
"min": -0.0147389592602849,
"max": 0.39576444029808044,
"count": 40
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.3618404865264893,
"min": -3.684739828109741,
"max": 93.79617309570312,
"count": 40
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06711962069190729,
"min": 0.06521425746835857,
"max": 0.07265472739878019,
"count": 40
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.939674689686702,
"min": 0.4880215411239046,
"max": 1.0542681269968548,
"count": 40
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012083479186999502,
"min": 0.00010189023895286073,
"max": 0.01624624423372249,
"count": 40
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16916870861799302,
"min": 0.0013245731063871895,
"max": 0.22744741927211484,
"count": 40
},
"Pyramids.Policy.LearningRate.mean": {
"value": 3.916670123047619e-06,
"min": 3.916670123047619e-06,
"max": 0.0002959588584899048,
"count": 40
},
"Pyramids.Policy.LearningRate.sum": {
"value": 5.483338172266666e-05,
"min": 5.483338172266666e-05,
"max": 0.0035195044768319165,
"count": 40
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10130552380952383,
"min": 0.10130552380952383,
"max": 0.1986529523809524,
"count": 40
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4182773333333336,
"min": 1.3905706666666668,
"max": 2.5731680833333335,
"count": 40
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001404218285714286,
"min": 0.0001404218285714286,
"max": 0.00986542994285714,
"count": 40
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0019659056,
"min": 0.0019659056,
"max": 0.11733949152500003,
"count": 40
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008018297143280506,
"min": 0.008018297143280506,
"max": 0.569322407245636,
"count": 40
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11225615441799164,
"min": 0.11225615441799164,
"max": 3.9852569103240967,
"count": 40
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 377.8,
"min": 353.6046511627907,
"max": 999.0,
"count": 40
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28335.0,
"min": 15984.0,
"max": 32675.0,
"count": 40
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5688586429754894,
"min": -1.0000000521540642,
"max": 1.5998674268119557,
"count": 40
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 117.6643982231617,
"min": -30.56700176000595,
"max": 137.5885987058282,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5688586429754894,
"min": -1.0000000521540642,
"max": 1.5998674268119557,
"count": 40
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 117.6643982231617,
"min": -30.56700176000595,
"max": 137.5885987058282,
"count": 40
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031512060582172124,
"min": 0.031031723506713182,
"max": 12.099198868498206,
"count": 40
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.3634045436629094,
"min": 2.3634045436629094,
"max": 193.5871818959713,
"count": 40
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748185149",
"python_version": "3.10.12 (main, Feb 4 2025, 14:57:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsRun1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748187801"
},
"total": 2651.474555421,
"count": 1,
"self": 0.4792221539996717,
"children": {
"run_training.setup": {
"total": 0.019182462999992822,
"count": 1,
"self": 0.019182462999992822
},
"TrainerController.start_learning": {
"total": 2650.976150804,
"count": 1,
"self": 1.624871871940286,
"children": {
"TrainerController._reset_env": {
"total": 3.3206037840000135,
"count": 1,
"self": 3.3206037840000135
},
"TrainerController.advance": {
"total": 2645.9451632340597,
"count": 76445,
"self": 1.582013245131293,
"children": {
"env_step": {
"total": 1828.9355414649553,
"count": 76445,
"self": 1650.2304457689552,
"children": {
"SubprocessEnvManager._take_step": {
"total": 177.75319938500138,
"count": 76445,
"self": 5.470636505054358,
"children": {
"TorchPolicy.evaluate": {
"total": 172.28256287994702,
"count": 75064,
"self": 172.28256287994702
}
}
},
"workers": {
"total": 0.9518963109986771,
"count": 76445,
"self": 0.0,
"children": {
"worker_root": {
"total": 2645.403381128954,
"count": 76445,
"is_parallel": true,
"self": 1123.4189623109519,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018121780000228682,
"count": 1,
"is_parallel": true,
"self": 0.0005732710000074803,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001238907000015388,
"count": 8,
"is_parallel": true,
"self": 0.001238907000015388
}
}
},
"UnityEnvironment.step": {
"total": 0.04596413000001576,
"count": 1,
"is_parallel": true,
"self": 0.0005336140000054002,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004527070000222011,
"count": 1,
"is_parallel": true,
"self": 0.0004527070000222011
},
"communicator.exchange": {
"total": 0.04333521199998813,
"count": 1,
"is_parallel": true,
"self": 0.04333521199998813
},
"steps_from_proto": {
"total": 0.0016425970000000234,
"count": 1,
"is_parallel": true,
"self": 0.0003649190000487579,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012776779999512655,
"count": 8,
"is_parallel": true,
"self": 0.0012776779999512655
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1521.9844188180023,
"count": 76444,
"is_parallel": true,
"self": 37.40288924602373,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 27.334630349065947,
"count": 76444,
"is_parallel": true,
"self": 27.334630349065947
},
"communicator.exchange": {
"total": 1344.2503973670252,
"count": 76444,
"is_parallel": true,
"self": 1344.2503973670252
},
"steps_from_proto": {
"total": 112.99650185588735,
"count": 76444,
"is_parallel": true,
"self": 22.323525861866074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.67297599402127,
"count": 611552,
"is_parallel": true,
"self": 90.67297599402127
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 815.4276085239729,
"count": 76445,
"self": 3.035592685941424,
"children": {
"process_trajectory": {
"total": 151.7309350920292,
"count": 76445,
"self": 151.39702853002916,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3339065620000383,
"count": 2,
"self": 0.3339065620000383
}
}
},
"_update_policy": {
"total": 660.6610807460023,
"count": 535,
"self": 366.8418774560113,
"children": {
"TorchPPOOptimizer.update": {
"total": 293.81920328999104,
"count": 27378,
"self": 293.81920328999104
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.179999895219225e-07,
"count": 1,
"self": 8.179999895219225e-07
},
"TrainerController._save_models": {
"total": 0.08551109600011841,
"count": 1,
"self": 0.001321020000432327,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08419007599968609,
"count": 1,
"self": 0.08419007599968609
}
}
}
}
}
}
}