Pyramids / run_logs /timers.json
jeffyuyu's picture
First Push
867be1b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.5034246444702148,
"min": 1.5034246444702148,
"max": 1.5034246444702148,
"count": 1
},
"Pyramids.Policy.Entropy.sum": {
"value": 45607.890625,
"min": 45607.890625,
"max": 45607.890625,
"count": 1
},
"Pyramids.Step.mean": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Step.sum": {
"value": 29952.0,
"min": 29952.0,
"max": 29952.0,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.18031790852546692,
"min": -0.18031790852546692,
"max": -0.18031790852546692,
"count": 1
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -42.73534393310547,
"min": -42.73534393310547,
"max": -42.73534393310547,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.19152510166168213,
"min": 0.19152510166168213,
"max": 0.19152510166168213,
"count": 1
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 45.391448974609375,
"min": 45.391448974609375,
"max": 45.391448974609375,
"count": 1
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0716974082898367,
"min": 0.0716974082898367,
"max": 0.0716974082898367,
"count": 1
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.5018818580288569,
"min": 0.5018818580288569,
"max": 0.5018818580288569,
"count": 1
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007697324133381511,
"min": 0.007697324133381511,
"max": 0.007697324133381511,
"count": 1
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.053881268933670574,
"min": 0.053881268933670574,
"max": 0.053881268933670574,
"count": 1
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00013835433959619045,
"min": 0.00013835433959619045,
"max": 0.00013835433959619045,
"count": 1
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0009684803771733332,
"min": 0.0009684803771733332,
"max": 0.0009684803771733332,
"count": 1
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1461180952380953,
"min": 0.1461180952380953,
"max": 0.1461180952380953,
"count": 1
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.022826666666667,
"min": 1.022826666666667,
"max": 1.022826666666667,
"count": 1
},
"Pyramids.Policy.Beta.mean": {
"value": 0.004617197714285715,
"min": 0.004617197714285715,
"max": 0.004617197714285715,
"count": 1
},
"Pyramids.Policy.Beta.sum": {
"value": 0.032320384,
"min": 0.032320384,
"max": 0.032320384,
"count": 1
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.424130916595459,
"min": 0.424130916595459,
"max": 0.424130916595459,
"count": 1
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 2.968916416168213,
"min": 2.968916416168213,
"max": 2.968916416168213,
"count": 1
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 999.0,
"max": 999.0,
"count": 1
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 15984.0,
"min": 15984.0,
"max": 15984.0,
"count": 1
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -1.0000000521540642,
"min": -1.0000000521540642,
"max": -1.0000000521540642,
"count": 1
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -16.000000834465027,
"min": -16.000000834465027,
"max": -16.000000834465027,
"count": 1
},
"Pyramids.Policy.RndReward.mean": {
"value": 8.953767689876258,
"min": 8.953767689876258,
"max": 8.953767689876258,
"count": 1
},
"Pyramids.Policy.RndReward.sum": {
"value": 143.26028303802013,
"min": 143.26028303802013,
"max": 143.26028303802013,
"count": 1
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748427203",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748427261"
},
"total": 58.233437822999804,
"count": 1,
"self": 0.4362452879995544,
"children": {
"run_training.setup": {
"total": 0.021715381000831258,
"count": 1,
"self": 0.021715381000831258
},
"TrainerController.start_learning": {
"total": 57.77547715399942,
"count": 1,
"self": 0.0352893760191364,
"children": {
"TrainerController._reset_env": {
"total": 2.2427645660000053,
"count": 1,
"self": 2.2427645660000053
},
"TrainerController.advance": {
"total": 55.37960114498037,
"count": 1896,
"self": 0.03992810796262347,
"children": {
"env_step": {
"total": 35.950463134007805,
"count": 1896,
"self": 31.172813437055993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4.756973386970458,
"count": 1896,
"self": 0.13903502697849035,
"children": {
"TorchPolicy.evaluate": {
"total": 4.6179383599919674,
"count": 1896,
"self": 4.6179383599919674
}
}
},
"workers": {
"total": 0.02067630998135428,
"count": 1896,
"self": 0.0,
"children": {
"worker_root": {
"total": 57.35537149798529,
"count": 1896,
"is_parallel": true,
"self": 29.314976506003404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020529370003714575,
"count": 1,
"is_parallel": true,
"self": 0.0006752650006092153,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013776719997622422,
"count": 8,
"is_parallel": true,
"self": 0.0013776719997622422
}
}
},
"UnityEnvironment.step": {
"total": 0.0467068179996204,
"count": 1,
"is_parallel": true,
"self": 0.0005192240005271742,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004764280001836596,
"count": 1,
"is_parallel": true,
"self": 0.0004764280001836596
},
"communicator.exchange": {
"total": 0.04412000999946031,
"count": 1,
"is_parallel": true,
"self": 0.04412000999946031
},
"steps_from_proto": {
"total": 0.0015911559994492563,
"count": 1,
"is_parallel": true,
"self": 0.00033996299953287235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001251192999916384,
"count": 8,
"is_parallel": true,
"self": 0.001251192999916384
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 28.040394991981884,
"count": 1895,
"is_parallel": true,
"self": 0.9406066899873622,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.6961545339918302,
"count": 1895,
"is_parallel": true,
"self": 0.6961545339918302
},
"communicator.exchange": {
"total": 23.528452249011025,
"count": 1895,
"is_parallel": true,
"self": 23.528452249011025
},
"steps_from_proto": {
"total": 2.875181518991667,
"count": 1895,
"is_parallel": true,
"self": 0.5931898639464634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.2819916550452035,
"count": 15160,
"is_parallel": true,
"self": 2.2819916550452035
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 19.38920990300994,
"count": 1896,
"self": 0.04552560200863809,
"children": {
"process_trajectory": {
"total": 3.461035165002613,
"count": 1896,
"self": 3.461035165002613
},
"_update_policy": {
"total": 15.88264913599869,
"count": 7,
"self": 8.70107598699633,
"children": {
"TorchPPOOptimizer.update": {
"total": 7.181573149002361,
"count": 663,
"self": 7.181573149002361
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0559997463133186e-06,
"count": 1,
"self": 1.0559997463133186e-06
},
"TrainerController._save_models": {
"total": 0.11782101100016007,
"count": 1,
"self": 0.0015779099994688295,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11624310100069124,
"count": 1,
"self": 0.11624310100069124
}
}
}
}
}
}
}