ppo-Pyramids / run_logs /timers.json
liaohongyue's picture
first commit
995f9f3 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.17291924357414246,
"min": 0.15377093851566315,
"max": 1.4117788076400757,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5206.9443359375,
"min": 4659.87451171875,
"max": 42827.72265625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999976.0,
"min": 29887.0,
"max": 2999976.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999976.0,
"min": 29887.0,
"max": 2999976.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8608826398849487,
"min": -0.10265273600816727,
"max": 0.8739694356918335,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 267.7344970703125,
"min": -24.739309310913086,
"max": 267.7344970703125,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.007689562626183033,
"min": -0.03204171732068062,
"max": 0.34547337889671326,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.391453981399536,
"min": -9.452306747436523,
"max": 81.87718963623047,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06951443805952467,
"min": 0.0630331672208758,
"max": 0.07449299893299567,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9732021328333454,
"min": 0.5524288011025276,
"max": 1.0564465693228962,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01517638965119009,
"min": 0.0012198266929550256,
"max": 0.015419875266421251,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21246945511666127,
"min": 0.012433926611563818,
"max": 0.22976893667838144,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4484923743452395e-06,
"min": 1.4484923743452395e-06,
"max": 0.0002984059630313458,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.027889324083335e-05,
"min": 2.027889324083335e-05,
"max": 0.0039691244769585335,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10048279761904765,
"min": 0.10048279761904765,
"max": 0.1994686541666667,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.406759166666667,
"min": 1.406759166666667,
"max": 2.723041466666666,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.8231482142857186e-05,
"min": 5.8231482142857186e-05,
"max": 0.00994691855125,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008152407500000006,
"min": 0.0008152407500000006,
"max": 0.13231184252,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005447447765618563,
"min": 0.005309185944497585,
"max": 0.4513447880744934,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07626426964998245,
"min": 0.07432860136032104,
"max": 3.6107583045959473,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 208.05333333333334,
"min": 204.4160583941606,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31208.0,
"min": 15919.0,
"max": 33281.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7786066532631715,
"min": -0.9999500517733395,
"max": 1.7955839335918427,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 266.7909979894757,
"min": -31.998401656746864,
"max": 266.7909979894757,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7786066532631715,
"min": -0.9999500517733395,
"max": 1.7955839335918427,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 266.7909979894757,
"min": -31.998401656746864,
"max": 266.7909979894757,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.011840315460212879,
"min": 0.011824341663614448,
"max": 9.361570369452238,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7760473190319317,
"min": 1.5844617829243361,
"max": 149.7851259112358,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740376031",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/alien/.local/share/mamba/envs/deep-rl/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740379184"
},
"total": 3152.4720249649836,
"count": 1,
"self": 0.21816878096433356,
"children": {
"run_training.setup": {
"total": 0.01668302499456331,
"count": 1,
"self": 0.01668302499456331
},
"TrainerController.start_learning": {
"total": 3152.2371731590247,
"count": 1,
"self": 2.5445676614763215,
"children": {
"TrainerController._reset_env": {
"total": 1.55027294595493,
"count": 1,
"self": 1.55027294595493
},
"TrainerController.advance": {
"total": 3148.0890001385706,
"count": 195212,
"self": 2.5107865219470114,
"children": {
"env_step": {
"total": 2009.3240775034064,
"count": 195212,
"self": 1729.7895267679123,
"children": {
"SubprocessEnvManager._take_step": {
"total": 277.89236204046756,
"count": 195212,
"self": 7.317112011776771,
"children": {
"TorchPolicy.evaluate": {
"total": 270.5752500286908,
"count": 187559,
"self": 270.5752500286908
}
}
},
"workers": {
"total": 1.6421886950265616,
"count": 195212,
"self": 0.0,
"children": {
"worker_root": {
"total": 3148.800737663987,
"count": 195212,
"is_parallel": true,
"self": 1594.055274826591,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009371869964525104,
"count": 1,
"is_parallel": true,
"self": 0.00026788696413859725,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006693000323139131,
"count": 8,
"is_parallel": true,
"self": 0.0006693000323139131
}
}
},
"UnityEnvironment.step": {
"total": 0.019988799991551787,
"count": 1,
"is_parallel": true,
"self": 0.00022407196229323745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00016757601406425238,
"count": 1,
"is_parallel": true,
"self": 0.00016757601406425238
},
"communicator.exchange": {
"total": 0.019009742012713104,
"count": 1,
"is_parallel": true,
"self": 0.019009742012713104
},
"steps_from_proto": {
"total": 0.0005874100024811924,
"count": 1,
"is_parallel": true,
"self": 0.00013781210873275995,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004495978937484324,
"count": 8,
"is_parallel": true,
"self": 0.0004495978937484324
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1554.7454628373962,
"count": 195211,
"is_parallel": true,
"self": 47.04075167508563,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.893535839451943,
"count": 195211,
"is_parallel": true,
"self": 24.893535839451943
},
"communicator.exchange": {
"total": 1352.8311300514033,
"count": 195211,
"is_parallel": true,
"self": 1352.8311300514033
},
"steps_from_proto": {
"total": 129.9800452714553,
"count": 195211,
"is_parallel": true,
"self": 26.629174274159595,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.35087099729571,
"count": 1561688,
"is_parallel": true,
"self": 103.35087099729571
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1136.254136113217,
"count": 195212,
"self": 4.513091377855744,
"children": {
"process_trajectory": {
"total": 221.54046510130865,
"count": 195212,
"self": 221.22121832327684,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3192467780318111,
"count": 6,
"self": 0.3192467780318111
}
}
},
"_update_policy": {
"total": 910.2005796340527,
"count": 1397,
"self": 478.96880203904584,
"children": {
"TorchPPOOptimizer.update": {
"total": 431.23177759500686,
"count": 68391,
"self": 431.23177759500686
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.799818947911263e-07,
"count": 1,
"self": 6.799818947911263e-07
},
"TrainerController._save_models": {
"total": 0.05333173304097727,
"count": 1,
"self": 0.000749443017411977,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05258229002356529,
"count": 1,
"self": 0.05258229002356529
}
}
}
}
}
}
}