ppo-PyramidsRND / run_logs /timers.json
dimgalli's picture
First Push
9266ea3 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38883206248283386,
"min": 0.37888020277023315,
"max": 1.4287042617797852,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11814.2734375,
"min": 11396.716796875,
"max": 43341.171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989924.0,
"min": 29877.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989924.0,
"min": 29877.0,
"max": 989924.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6385881900787354,
"min": -0.09848522394895554,
"max": 0.6385881900787354,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.8046875,
"min": -23.63645362854004,
"max": 180.19290161132812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00990214105695486,
"min": -0.04188278317451477,
"max": 0.355866938829422,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.772599458694458,
"min": -11.09893798828125,
"max": 84.34046173095703,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06260255032107467,
"min": 0.06260255032107467,
"max": 0.07363696472895837,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.8764357044950455,
"min": 0.48967032092048024,
"max": 1.1045544709343755,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014336948462979443,
"min": 0.00029128638437028146,
"max": 0.01696533248122294,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2007172784817122,
"min": 0.003786722996813659,
"max": 0.23751465473712113,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2400618723928565e-06,
"min": 7.2400618723928565e-06,
"max": 0.0002952361301593857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001013608662135,
"min": 0.0001013608662135,
"max": 0.0034859815380061995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241332142857144,
"min": 0.10241332142857144,
"max": 0.19841204285714284,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4337865000000003,
"min": 1.3888843,
"max": 2.617646,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002510908107142857,
"min": 0.0002510908107142857,
"max": 0.00984136308142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00351527135,
"min": 0.00351527135,
"max": 0.11621318062000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009000374004244804,
"min": 0.009000374004244804,
"max": 0.39489126205444336,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12600523233413696,
"min": 0.12600523233413696,
"max": 2.7642388343811035,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 310.2826086956522,
"min": 299.15384615384613,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28546.0,
"min": 16676.0,
"max": 32942.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6898171778327675,
"min": -0.999875052832067,
"max": 1.6898171778327675,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 157.15299753844738,
"min": -31.996001690626144,
"max": 173.187998957932,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6898171778327675,
"min": -0.999875052832067,
"max": 1.6898171778327675,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 157.15299753844738,
"min": -31.996001690626144,
"max": 173.187998957932,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02872257416890133,
"min": 0.02854337928917268,
"max": 7.65863890507642,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6711993977078237,
"min": 2.6711993977078237,
"max": 130.19686138629913,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1765969415",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1765971854"
},
"total": 2439.0524384939995,
"count": 1,
"self": 0.5345145949991092,
"children": {
"run_training.setup": {
"total": 0.02316170900007819,
"count": 1,
"self": 0.02316170900007819
},
"TrainerController.start_learning": {
"total": 2438.49476219,
"count": 1,
"self": 1.616505465026421,
"children": {
"TrainerController._reset_env": {
"total": 2.0723512060001212,
"count": 1,
"self": 2.0723512060001212
},
"TrainerController.advance": {
"total": 2434.727396469973,
"count": 64038,
"self": 1.6168280129559207,
"children": {
"env_step": {
"total": 1756.915445970043,
"count": 64038,
"self": 1591.8379089980563,
"children": {
"SubprocessEnvManager._take_step": {
"total": 164.1021421999808,
"count": 64038,
"self": 5.028818298999113,
"children": {
"TorchPolicy.evaluate": {
"total": 159.07332390098168,
"count": 62573,
"self": 159.07332390098168
}
}
},
"workers": {
"total": 0.9753947720059841,
"count": 64038,
"self": 0.0,
"children": {
"worker_root": {
"total": 2431.6109919630126,
"count": 64038,
"is_parallel": true,
"self": 969.3754612500197,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018616429999838147,
"count": 1,
"is_parallel": true,
"self": 0.0005819080001856491,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012797349997981655,
"count": 8,
"is_parallel": true,
"self": 0.0012797349997981655
}
}
},
"UnityEnvironment.step": {
"total": 0.05938885300020047,
"count": 1,
"is_parallel": true,
"self": 0.0005632910001622804,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047537199998259894,
"count": 1,
"is_parallel": true,
"self": 0.00047537199998259894
},
"communicator.exchange": {
"total": 0.056517838999980086,
"count": 1,
"is_parallel": true,
"self": 0.056517838999980086
},
"steps_from_proto": {
"total": 0.001832351000075505,
"count": 1,
"is_parallel": true,
"self": 0.00042198799997095193,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001410363000104553,
"count": 8,
"is_parallel": true,
"self": 0.001410363000104553
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1462.2355307129928,
"count": 64037,
"is_parallel": true,
"self": 35.95260894592229,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.700905000051534,
"count": 64037,
"is_parallel": true,
"self": 25.700905000051534
},
"communicator.exchange": {
"total": 1284.2913955989934,
"count": 64037,
"is_parallel": true,
"self": 1284.2913955989934
},
"steps_from_proto": {
"total": 116.29062116802561,
"count": 64037,
"is_parallel": true,
"self": 24.77017002419916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.52045114382645,
"count": 512296,
"is_parallel": true,
"self": 91.52045114382645
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 676.195122486974,
"count": 64038,
"self": 3.062663447994737,
"children": {
"process_trajectory": {
"total": 131.81877608997274,
"count": 64038,
"self": 131.62595273197303,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19282335799971406,
"count": 2,
"self": 0.19282335799971406
}
}
},
"_update_policy": {
"total": 541.3136829490065,
"count": 455,
"self": 301.16457505005087,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.14910789895566,
"count": 22776,
"self": 240.14910789895566
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.320002962136641e-07,
"count": 1,
"self": 8.320002962136641e-07
},
"TrainerController._save_models": {
"total": 0.07850821700048982,
"count": 1,
"self": 0.0009922790004566195,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0775159380000332,
"count": 1,
"self": 0.0775159380000332
}
}
}
}
}
}
}