ppo-Pyramid / run_logs /timers.json
mikewzp's picture
Pyramids Push
87d4a75 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.27550312876701355,
"min": 0.25834792852401733,
"max": 1.4050623178482056,
"count": 66
},
"Pyramids.Policy.Entropy.sum": {
"value": 8243.0537109375,
"min": 7746.3046875,
"max": 42623.96875,
"count": 66
},
"Pyramids.Step.mean": {
"value": 1979900.0,
"min": 29952.0,
"max": 1979900.0,
"count": 66
},
"Pyramids.Step.sum": {
"value": 1979900.0,
"min": 29952.0,
"max": 1979900.0,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6988829374313354,
"min": -0.11405694484710693,
"max": 0.6988829374313354,
"count": 66
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 199.88052368164062,
"min": -27.48772430419922,
"max": 200.10537719726562,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01821029745042324,
"min": -0.0070686121471226215,
"max": 0.6022505164146423,
"count": 66
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.2081451416015625,
"min": -1.9014567136764526,
"max": 142.73336791992188,
"count": 66
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06929339248260173,
"min": 0.06282748713038884,
"max": 0.07449612383996802,
"count": 66
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9701074947564243,
"min": 0.5001661447938947,
"max": 1.0594284480789167,
"count": 66
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014041961081342638,
"min": 9.133431192385169e-05,
"max": 0.017774556835112667,
"count": 66
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19658745513879694,
"min": 0.001187346055010072,
"max": 0.21795929489114008,
"count": 66
},
"Pyramids.Policy.LearningRate.mean": {
"value": 5.335469650114285e-06,
"min": 5.335469650114285e-06,
"max": 0.0002975753150939428,
"count": 66
},
"Pyramids.Policy.LearningRate.sum": {
"value": 7.469657510159999e-05,
"min": 7.469657510159999e-05,
"max": 0.0037911269862910494,
"count": 66
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10177845714285716,
"min": 0.10177845714285716,
"max": 0.19919177142857142,
"count": 66
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4248984000000002,
"min": 1.3943424,
"max": 2.66370895,
"count": 66
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00018766786857142857,
"min": 0.00018766786857142857,
"max": 0.009919257965714285,
"count": 66
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00262735016,
"min": 0.00262735016,
"max": 0.12638452410499998,
"count": 66
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010234856978058815,
"min": 0.009864476509392262,
"max": 0.745283305644989,
"count": 66
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1432880014181137,
"min": 0.1381026655435562,
"max": 5.216983318328857,
"count": 66
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 275.64220183486236,
"min": 268.60550458715596,
"max": 999.0,
"count": 66
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30045.0,
"min": 15984.0,
"max": 34141.0,
"count": 66
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6892908945679665,
"min": -1.0000000521540642,
"max": 1.7180094158030905,
"count": 66
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 185.8219984024763,
"min": -31.99480167031288,
"max": 185.8219984024763,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6892908945679665,
"min": -1.0000000521540642,
"max": 1.7180094158030905,
"count": 66
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 185.8219984024763,
"min": -31.99480167031288,
"max": 185.8219984024763,
"count": 66
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029029120984540153,
"min": 0.0283566456418399,
"max": 15.885404660366476,
"count": 66
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1932033082994167,
"min": 3.0069052900362294,
"max": 254.1664745658636,
"count": 66
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 66
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742279590",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742284517"
},
"total": 4926.48045705,
"count": 1,
"self": 0.5444621679998818,
"children": {
"run_training.setup": {
"total": 0.024440325000000485,
"count": 1,
"self": 0.024440325000000485
},
"TrainerController.start_learning": {
"total": 4925.911554557,
"count": 1,
"self": 2.9366226261490738,
"children": {
"TrainerController._reset_env": {
"total": 3.5656723109999575,
"count": 1,
"self": 3.5656723109999575
},
"TrainerController.advance": {
"total": 4919.3157502618515,
"count": 128070,
"self": 3.003288209868515,
"children": {
"env_step": {
"total": 3445.7855705750335,
"count": 128070,
"self": 3119.412128281993,
"children": {
"SubprocessEnvManager._take_step": {
"total": 324.68119087800596,
"count": 128070,
"self": 9.874450978089044,
"children": {
"TorchPolicy.evaluate": {
"total": 314.8067398999169,
"count": 125065,
"self": 314.8067398999169
}
}
},
"workers": {
"total": 1.6922514150347752,
"count": 128070,
"self": 0.0,
"children": {
"worker_root": {
"total": 4915.762005272174,
"count": 128070,
"is_parallel": true,
"self": 2033.5907745992172,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0060559099999863975,
"count": 1,
"is_parallel": true,
"self": 0.004594974999804435,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001460935000181962,
"count": 8,
"is_parallel": true,
"self": 0.001460935000181962
}
}
},
"UnityEnvironment.step": {
"total": 0.05045673799997985,
"count": 1,
"is_parallel": true,
"self": 0.0005193279999957667,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047720299994580273,
"count": 1,
"is_parallel": true,
"self": 0.00047720299994580273
},
"communicator.exchange": {
"total": 0.04776102899995749,
"count": 1,
"is_parallel": true,
"self": 0.04776102899995749
},
"steps_from_proto": {
"total": 0.0016991780000807921,
"count": 1,
"is_parallel": true,
"self": 0.00036389200033681846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013352859997439737,
"count": 8,
"is_parallel": true,
"self": 0.0013352859997439737
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2882.171230672957,
"count": 128069,
"is_parallel": true,
"self": 68.64760795904567,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 47.6284434188525,
"count": 128069,
"is_parallel": true,
"self": 47.6284434188525
},
"communicator.exchange": {
"total": 2563.0016967229303,
"count": 128069,
"is_parallel": true,
"self": 2563.0016967229303
},
"steps_from_proto": {
"total": 202.8934825721284,
"count": 128069,
"is_parallel": true,
"self": 41.42429864008659,
"children": {
"_process_rank_one_or_two_observation": {
"total": 161.4691839320418,
"count": 1024552,
"is_parallel": true,
"self": 161.4691839320418
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1470.5268914769495,
"count": 128070,
"self": 5.889992732891869,
"children": {
"process_trajectory": {
"total": 270.16659548205894,
"count": 128070,
"self": 269.7044371840591,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4621582979998493,
"count": 4,
"self": 0.4621582979998493
}
}
},
"_update_policy": {
"total": 1194.4703032619987,
"count": 921,
"self": 655.8847790780462,
"children": {
"TorchPPOOptimizer.update": {
"total": 538.5855241839524,
"count": 45588,
"self": 538.5855241839524
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.750001481734216e-07,
"count": 1,
"self": 9.750001481734216e-07
},
"TrainerController._save_models": {
"total": 0.09350838299997122,
"count": 1,
"self": 0.0018763500002023648,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09163203299976885,
"count": 1,
"self": 0.09163203299976885
}
}
}
}
}
}
}