Alrcatraz's picture
First Push
5caa09a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5181789398193359,
"min": 0.5164467096328735,
"max": 1.4563511610031128,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15354.677734375,
"min": 15354.677734375,
"max": 44179.8671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989909.0,
"min": 29952.0,
"max": 989909.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.22083735466003418,
"min": -0.10984797030687332,
"max": 0.2893996238708496,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 56.976036071777344,
"min": -26.473360061645508,
"max": 75.5333023071289,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.30310919880867004,
"min": -0.30310919880867004,
"max": 0.3286135196685791,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -78.2021713256836,
"min": -78.2021713256836,
"max": 79.19586181640625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06456139079643693,
"min": 0.06370337822452364,
"max": 0.07247596805618818,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9038594711501169,
"min": 0.48230402580721315,
"max": 1.072666681133092,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.03862745344254197,
"min": 0.0004408933492176216,
"max": 0.03862745344254197,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.5407843481955875,
"min": 0.006172506889046702,
"max": 0.5407843481955875,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4237689540142825e-06,
"min": 7.4237689540142825e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010393276535619996,
"min": 0.00010393276535619996,
"max": 0.0036330589889804004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247455714285715,
"min": 0.10247455714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346438000000001,
"min": 1.3886848,
"max": 2.6110196,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025720825857142857,
"min": 0.00025720825857142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036009156200000002,
"min": 0.0036009156200000002,
"max": 0.12112085803999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008648467250168324,
"min": 0.008531960658729076,
"max": 0.4464131295681,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12107853591442108,
"min": 0.11944745481014252,
"max": 3.124891996383667,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 551.9056603773585,
"min": 520.561403508772,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29251.0,
"min": 15984.0,
"max": 32625.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9573848756416788,
"min": -1.0000000521540642,
"max": 1.0825213948264718,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 50.74139840900898,
"min": -29.90500160306692,
"max": 60.62119811028242,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9573848756416788,
"min": -1.0000000521540642,
"max": 1.0825213948264718,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 50.74139840900898,
"min": -29.90500160306692,
"max": 60.62119811028242,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04874600977461153,
"min": 0.04874600977461153,
"max": 8.949149466119707,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.583538518054411,
"min": 2.583538518054411,
"max": 143.1863914579153,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739623915",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739626026"
},
"total": 2111.018993109,
"count": 1,
"self": 0.533668981000119,
"children": {
"run_training.setup": {
"total": 0.019841288999941753,
"count": 1,
"self": 0.019841288999941753
},
"TrainerController.start_learning": {
"total": 2110.465482839,
"count": 1,
"self": 1.1965229730817555,
"children": {
"TrainerController._reset_env": {
"total": 2.2189723460001005,
"count": 1,
"self": 2.2189723460001005
},
"TrainerController.advance": {
"total": 2106.955907731918,
"count": 63504,
"self": 1.2131683809502647,
"children": {
"env_step": {
"total": 1438.0470895600113,
"count": 63504,
"self": 1291.8470880659609,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.50010579903392,
"count": 63504,
"self": 4.3356984520412425,
"children": {
"TorchPolicy.evaluate": {
"total": 141.16440734699268,
"count": 62578,
"self": 141.16440734699268
}
}
},
"workers": {
"total": 0.6998956950164938,
"count": 63504,
"self": 0.0,
"children": {
"worker_root": {
"total": 2105.9233774130066,
"count": 63504,
"is_parallel": true,
"self": 918.2126503310074,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002303179000136879,
"count": 1,
"is_parallel": true,
"self": 0.0006705850000798819,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016325940000569972,
"count": 8,
"is_parallel": true,
"self": 0.0016325940000569972
}
}
},
"UnityEnvironment.step": {
"total": 0.04699932800008355,
"count": 1,
"is_parallel": true,
"self": 0.0004979060001915059,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000532281999994666,
"count": 1,
"is_parallel": true,
"self": 0.000532281999994666
},
"communicator.exchange": {
"total": 0.044414036000034685,
"count": 1,
"is_parallel": true,
"self": 0.044414036000034685
},
"steps_from_proto": {
"total": 0.0015551039998626948,
"count": 1,
"is_parallel": true,
"self": 0.00033998799995060835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012151159999120864,
"count": 8,
"is_parallel": true,
"self": 0.0012151159999120864
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1187.7107270819993,
"count": 63503,
"is_parallel": true,
"self": 30.47263692694014,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.42213299399964,
"count": 63503,
"is_parallel": true,
"self": 22.42213299399964
},
"communicator.exchange": {
"total": 1044.2037127810324,
"count": 63503,
"is_parallel": true,
"self": 1044.2037127810324
},
"steps_from_proto": {
"total": 90.61224438002705,
"count": 63503,
"is_parallel": true,
"self": 17.53887488713508,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.07336949289197,
"count": 508024,
"is_parallel": true,
"self": 73.07336949289197
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 667.6956497909566,
"count": 63504,
"self": 2.298490897980173,
"children": {
"process_trajectory": {
"total": 120.85800945197457,
"count": 63504,
"self": 120.47679003397457,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3812194180000006,
"count": 2,
"self": 0.3812194180000006
}
}
},
"_update_policy": {
"total": 544.5391494410019,
"count": 450,
"self": 299.2475964299849,
"children": {
"TorchPPOOptimizer.update": {
"total": 245.29155301101696,
"count": 22794,
"self": 245.29155301101696
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.55999735702062e-07,
"count": 1,
"self": 7.55999735702062e-07
},
"TrainerController._save_models": {
"total": 0.0940790320000815,
"count": 1,
"self": 0.0016754510002101597,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09240358099987134,
"count": 1,
"self": 0.09240358099987134
}
}
}
}
}
}
}