ppo-Pyramids / run_logs /timers.json
PhuQuy23TNT1's picture
Pyramids trained with RND
79d0c97 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.18001285195350647,
"min": 0.1632961630821228,
"max": 1.5461946725845337,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5391.7451171875,
"min": 4951.1396484375,
"max": 46905.36328125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999980.0,
"min": 29952.0,
"max": 2999980.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999980.0,
"min": 29952.0,
"max": 2999980.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7842468023300171,
"min": -0.11749715358018875,
"max": 0.8778895139694214,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 229.00006103515625,
"min": -28.316814422607422,
"max": 269.5120849609375,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01846114546060562,
"min": -0.013810768723487854,
"max": 0.17249058187007904,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.390654563903809,
"min": -3.5217459201812744,
"max": 41.570228576660156,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0672413562008712,
"min": 0.06391417523204848,
"max": 0.07338632058824823,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9413789868121968,
"min": 0.47285032754860307,
"max": 1.0984310142789493,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015417895554925226,
"min": 0.00015045762951648871,
"max": 0.017985366753095554,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21585053776895316,
"min": 0.0019559491837143534,
"max": 0.26978050129643333,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5701566195047585e-06,
"min": 1.5701566195047585e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.198219267306662e-05,
"min": 2.198219267306662e-05,
"max": 0.0037174249608584,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052335238095238,
"min": 0.10052335238095238,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4073269333333334,
"min": 1.3962282666666668,
"max": 2.6776197333333336,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.228290285714275e-05,
"min": 6.228290285714275e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008719606399999986,
"min": 0.0008719606399999986,
"max": 0.12393024584000001,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004939805716276169,
"min": 0.004756361246109009,
"max": 0.2580133080482483,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06915728002786636,
"min": 0.06658905744552612,
"max": 1.8060932159423828,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 236.4375,
"min": 206.7482993197279,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30264.0,
"min": 15984.0,
"max": 32599.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7322968628723174,
"min": -1.0000000521540642,
"max": 1.7942516920315166,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 221.73399844765663,
"min": -31.996801644563675,
"max": 263.7549987286329,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7322968628723174,
"min": -1.0000000521540642,
"max": 1.7942516920315166,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 221.73399844765663,
"min": -31.996801644563675,
"max": 263.7549987286329,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.012427045508729861,
"min": 0.011186415655735383,
"max": 5.283129307441413,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.5906618251174223,
"min": 1.487793282212806,
"max": 84.53006891906261,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1760089819",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/content/ml-agents-env/bin/mlagents-learn /content/ml-agents/config/ppo/PyramidsRND.yaml --env=/content/ml-agents/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics",
"mlagents_version": "1.0.0",
"mlagents_envs_version": "1.0.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1760096891"
},
"total": 7072.295461977,
"count": 1,
"self": 0.47867414799929975,
"children": {
"run_training.setup": {
"total": 0.01366893900001287,
"count": 1,
"self": 0.01366893900001287
},
"TrainerController.start_learning": {
"total": 7071.80311889,
"count": 1,
"self": 3.9069011329866044,
"children": {
"TrainerController._reset_env": {
"total": 1.8127348029998984,
"count": 1,
"self": 1.8127348029998984
},
"TrainerController.advance": {
"total": 7066.011442064014,
"count": 194634,
"self": 4.175405080989549,
"children": {
"env_step": {
"total": 5092.059416671982,
"count": 194634,
"self": 4761.508632050138,
"children": {
"SubprocessEnvManager._take_step": {
"total": 328.21444867288017,
"count": 194634,
"self": 13.40431451267375,
"children": {
"TorchPolicy.evaluate": {
"total": 314.8101341602064,
"count": 187570,
"self": 314.8101341602064
}
}
},
"workers": {
"total": 2.336335948963665,
"count": 194634,
"self": 0.0,
"children": {
"worker_root": {
"total": 7059.801536872204,
"count": 194634,
"is_parallel": true,
"self": 2621.233063748012,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00291080299996338,
"count": 1,
"is_parallel": true,
"self": 0.0011603630000536214,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017504399999097586,
"count": 8,
"is_parallel": true,
"self": 0.0017504399999097586
}
}
},
"UnityEnvironment.step": {
"total": 0.04978992599990306,
"count": 1,
"is_parallel": true,
"self": 0.000565019999839933,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048564600001554936,
"count": 1,
"is_parallel": true,
"self": 0.00048564600001554936
},
"communicator.exchange": {
"total": 0.04680659699999978,
"count": 1,
"is_parallel": true,
"self": 0.04680659699999978
},
"steps_from_proto": {
"total": 0.0019326630000477962,
"count": 1,
"is_parallel": true,
"self": 0.00046046100032981485,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014722019997179814,
"count": 8,
"is_parallel": true,
"self": 0.0014722019997179814
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4438.568473124193,
"count": 194633,
"is_parallel": true,
"self": 102.77959420744446,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 71.85479475277077,
"count": 194633,
"is_parallel": true,
"self": 71.85479475277077
},
"communicator.exchange": {
"total": 3899.221208797945,
"count": 194633,
"is_parallel": true,
"self": 3899.221208797945
},
"steps_from_proto": {
"total": 364.712875366032,
"count": 194633,
"is_parallel": true,
"self": 75.80643908422655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 288.90643628180544,
"count": 1557064,
"is_parallel": true,
"self": 288.90643628180544
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1969.7766203110418,
"count": 194634,
"self": 7.795919375044832,
"children": {
"process_trajectory": {
"total": 331.20437177000474,
"count": 194634,
"self": 330.73036762500453,
"children": {
"RLTrainer._checkpoint": {
"total": 0.47400414500020815,
"count": 6,
"self": 0.47400414500020815
}
}
},
"_update_policy": {
"total": 1630.7763291659921,
"count": 1386,
"self": 1011.6945659069124,
"children": {
"TorchPPOOptimizer.update": {
"total": 619.0817632590797,
"count": 68406,
"self": 619.0817632590797
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.209999577957205e-07,
"count": 1,
"self": 8.209999577957205e-07
},
"TrainerController._save_models": {
"total": 0.07204006899974047,
"count": 1,
"self": 0.0012041559994031559,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07083591300033731,
"count": 1,
"self": 0.07083591300033731
}
}
}
}
}
}
}