ppo-Pyramids / run_logs /timers.json
AndVilches's picture
Act5-Pyramids
f84ff77 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.453093022108078,
"min": 0.453093022108078,
"max": 1.409125566482544,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13455.05078125,
"min": 13455.05078125,
"max": 42747.234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989961.0,
"min": 29873.0,
"max": 989961.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989961.0,
"min": 29873.0,
"max": 989961.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3831951916217804,
"min": -0.14299391210079193,
"max": 0.48588743805885315,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 101.54672241210938,
"min": -33.91477966308594,
"max": 131.18960571289062,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01485910639166832,
"min": 0.011587546207010746,
"max": 0.48251664638519287,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.9376633167266846,
"min": 3.093874931335449,
"max": 114.3564453125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07046148653417865,
"min": 0.06349505386708229,
"max": 0.07229583929074393,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9864608114785012,
"min": 0.5000051367608429,
"max": 1.0287088412539258,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013164488933286296,
"min": 0.00018941111886884622,
"max": 0.015993467040797918,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18430284506600814,
"min": 0.002272933426426155,
"max": 0.22390853857117085,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.476097508000002e-06,
"min": 7.476097508000002e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010466536511200003,
"min": 0.00010466536511200003,
"max": 0.0033312372895875995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249200000000003,
"min": 0.10249200000000003,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348880000000004,
"min": 1.3886848,
"max": 2.4019470000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002589508000000001,
"min": 0.0002589508000000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036253112000000014,
"min": 0.0036253112000000014,
"max": 0.11105019876000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01657802239060402,
"min": 0.01657802239060402,
"max": 0.4734252095222473,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23209230601787567,
"min": 0.23209230601787567,
"max": 3.313976526260376,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 505.7894736842105,
"min": 397.8974358974359,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28830.0,
"min": 16800.0,
"max": 32241.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3908172073806155,
"min": -0.9999125525355339,
"max": 1.4995974432441252,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 80.6673980280757,
"min": -31.997201681137085,
"max": 118.4681980162859,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3908172073806155,
"min": -0.9999125525355339,
"max": 1.4995974432441252,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 80.6673980280757,
"min": -31.997201681137085,
"max": 118.4681980162859,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08623238031351979,
"min": 0.07286444600211762,
"max": 8.88130902192172,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.001478058184148,
"min": 5.001478058184148,
"max": 150.98225337266922,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741953933",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741956111"
},
"total": 2177.918342014,
"count": 1,
"self": 0.5826338179999766,
"children": {
"run_training.setup": {
"total": 0.02030397799990169,
"count": 1,
"self": 0.02030397799990169
},
"TrainerController.start_learning": {
"total": 2177.315404218,
"count": 1,
"self": 1.3315470610091324,
"children": {
"TrainerController._reset_env": {
"total": 2.161311093999757,
"count": 1,
"self": 2.161311093999757
},
"TrainerController.advance": {
"total": 2173.6783194319905,
"count": 63456,
"self": 1.3719382519466308,
"children": {
"env_step": {
"total": 1489.093976970073,
"count": 63456,
"self": 1330.91040578393,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.38387551717005,
"count": 63456,
"self": 4.768365410184288,
"children": {
"TorchPolicy.evaluate": {
"total": 152.61551010698577,
"count": 62556,
"self": 152.61551010698577
}
}
},
"workers": {
"total": 0.7996956689730723,
"count": 63456,
"self": 0.0,
"children": {
"worker_root": {
"total": 2172.1009076298647,
"count": 63456,
"is_parallel": true,
"self": 953.5645027409064,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002043379000042478,
"count": 1,
"is_parallel": true,
"self": 0.0007203569998637249,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001323022000178753,
"count": 8,
"is_parallel": true,
"self": 0.001323022000178753
}
}
},
"UnityEnvironment.step": {
"total": 0.0747499809999681,
"count": 1,
"is_parallel": true,
"self": 0.0005417870002020209,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004515489999903366,
"count": 1,
"is_parallel": true,
"self": 0.0004515489999903366
},
"communicator.exchange": {
"total": 0.07206241499989119,
"count": 1,
"is_parallel": true,
"self": 0.07206241499989119
},
"steps_from_proto": {
"total": 0.0016942299998845556,
"count": 1,
"is_parallel": true,
"self": 0.0003574280003704189,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013368019995141367,
"count": 8,
"is_parallel": true,
"self": 0.0013368019995141367
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1218.5364048889583,
"count": 63455,
"is_parallel": true,
"self": 31.896653183194758,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.66034070299156,
"count": 63455,
"is_parallel": true,
"self": 23.66034070299156
},
"communicator.exchange": {
"total": 1065.1386385189348,
"count": 63455,
"is_parallel": true,
"self": 1065.1386385189348
},
"steps_from_proto": {
"total": 97.8407724838371,
"count": 63455,
"is_parallel": true,
"self": 19.752610196371734,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.08816228746537,
"count": 507640,
"is_parallel": true,
"self": 78.08816228746537
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 683.2124042099708,
"count": 63456,
"self": 2.5418236499981504,
"children": {
"process_trajectory": {
"total": 127.98841752397175,
"count": 63456,
"self": 127.72404617297207,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26437135099968145,
"count": 2,
"self": 0.26437135099968145
}
}
},
"_update_policy": {
"total": 552.6821630360009,
"count": 446,
"self": 303.04228856601276,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.63987446998817,
"count": 22821,
"self": 249.63987446998817
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.302000782743562e-06,
"count": 1,
"self": 1.302000782743562e-06
},
"TrainerController._save_models": {
"total": 0.14422532899970975,
"count": 1,
"self": 0.0038966459997027414,
"children": {
"RLTrainer._checkpoint": {
"total": 0.140328683000007,
"count": 1,
"self": 0.140328683000007
}
}
}
}
}
}
}