PyramidsRND / run_logs /timers.json
LunaMeme's picture
initial commit
fbbc2c7 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.4965856075286865,
"min": 1.4965856075286865,
"max": 1.4965856075286865,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 45400.421875,
"min": 45400.421875,
"max": 45400.421875,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.08600808680057526,
"min": 0.08600808680057526,
"max": 0.08600808680057526,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 20.3839168548584,
"min": 20.3839168548584,
"max": 20.3839168548584,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.25756609439849854,
"min": 0.25756609439849854,
"max": 0.25756609439849854,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 61.04316711425781,
"min": 61.04316711425781,
"max": 61.04316711425781,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07188484326766699,
"min": 0.07188484326766699,
"max": 0.07188484326766699,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5031939028736689,
"min": 0.5031939028736689,
"max": 0.5031939028736689,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006305784135028588,
"min": 0.006305784135028588,
"max": 0.006305784135028588,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04414048894520012,
"min": 0.04414048894520012,
"max": 0.04414048894520012,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00013835433959619048,
"min": 0.00013835433959619048,
"max": 0.00013835433959619048,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0009684803771733333,
"min": 0.0009684803771733333,
"max": 0.0009684803771733333,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14611809523809524,
"min": 0.14611809523809524,
"max": 0.14611809523809524,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.0228266666666668,
"min": 1.0228266666666668,
"max": 1.0228266666666668,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004617197714285715,
"min": 0.004617197714285715,
"max": 0.004617197714285715,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.032320384,
"min": 0.032320384,
"max": 0.032320384,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.42595890164375305,
"min": 0.42595890164375305,
"max": 0.42595890164375305,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 2.9817123413085938,
"min": 2.9817123413085938,
"max": 2.9817123413085938,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 7.356523561291397,
"min": 7.356523561291397,
"max": 7.356523561291397,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 117.70437698066235,
"min": 117.70437698066235,
"max": 117.70437698066235,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1735364162",
"python_version": "3.10.12 (main, Nov 6 2024, 20:22:13) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1735364234"
},
"total": 71.635124787,
"count": 1,
"self": 0.7070392129999163,
"children": {
"run_training.setup": {
"total": 0.07771508000007543,
"count": 1,
"self": 0.07771508000007543
},
"TrainerController.start_learning": {
"total": 70.850370494,
"count": 1,
"self": 0.07010627399745317,
"children": {
"TrainerController._reset_env": {
"total": 2.902195200999927,
"count": 1,
"self": 2.902195200999927
},
"TrainerController.advance": {
"total": 67.7836262620026,
"count": 1896,
"self": 0.07678450200057796,
"children": {
"env_step": {
"total": 48.617134404002854,
"count": 1896,
"self": 44.059663870005124,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4.517310955999392,
"count": 1896,
"self": 0.2178728749978518,
"children": {
"TorchPolicy.evaluate": {
"total": 4.29943808100154,
"count": 1896,
"self": 4.29943808100154
}
}
},
"workers": {
"total": 0.040159577998338136,
"count": 1896,
"self": 0.0,
"children": {
"worker_root": {
"total": 70.4121905179984,
"count": 1896,
"is_parallel": true,
"self": 31.567749567996202,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033016339999676347,
"count": 1,
"is_parallel": true,
"self": 0.001163695999821357,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021379380001462778,
"count": 8,
"is_parallel": true,
"self": 0.0021379380001462778
}
}
},
"UnityEnvironment.step": {
"total": 0.06795123500000955,
"count": 1,
"is_parallel": true,
"self": 0.000767558999996254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005715510000072754,
"count": 1,
"is_parallel": true,
"self": 0.0005715510000072754
},
"communicator.exchange": {
"total": 0.06448035300002175,
"count": 1,
"is_parallel": true,
"self": 0.06448035300002175
},
"steps_from_proto": {
"total": 0.0021317719999842666,
"count": 1,
"is_parallel": true,
"self": 0.0004157430000759632,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017160289999083034,
"count": 8,
"is_parallel": true,
"self": 0.0017160289999083034
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 38.844440950002195,
"count": 1895,
"is_parallel": true,
"self": 1.4583566060009616,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.933207287000414,
"count": 1895,
"is_parallel": true,
"self": 0.933207287000414
},
"communicator.exchange": {
"total": 32.50677248500028,
"count": 1895,
"is_parallel": true,
"self": 32.50677248500028
},
"steps_from_proto": {
"total": 3.946104572000536,
"count": 1895,
"is_parallel": true,
"self": 0.861165769003378,
"children": {
"_process_rank_one_or_two_observation": {
"total": 3.084938802997158,
"count": 15160,
"is_parallel": true,
"self": 3.084938802997158
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19.089707355999167,
"count": 1896,
"self": 0.09070551899901602,
"children": {
"process_trajectory": {
"total": 4.593256399000097,
"count": 1896,
"self": 4.593256399000097
},
"_update_policy": {
"total": 14.405745438000054,
"count": 7,
"self": 7.842199120999794,
"children": {
"TorchPPOOptimizer.update": {
"total": 6.5635463170002595,
"count": 442,
"self": 6.5635463170002595
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1749999657695298e-06,
"count": 1,
"self": 1.1749999657695298e-06
},
"TrainerController._save_models": {
"total": 0.09444158200005859,
"count": 1,
"self": 0.0009052660000179458,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09353631600004064,
"count": 1,
"self": 0.09353631600004064
}
}
}
}
}
}
}