AF6ECHO's picture
First Push
d531a7f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2812069356441498,
"min": 0.28045454621315,
"max": 1.390143632888794,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8476.7021484375,
"min": 8350.814453125,
"max": 42171.3984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989898.0,
"min": 29952.0,
"max": 989898.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6066329479217529,
"min": -0.07792646437883377,
"max": 0.6830261945724487,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 171.6771240234375,
"min": -18.780277252197266,
"max": 202.17575073242188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003188591217622161,
"min": -0.009613758884370327,
"max": 0.4039810001850128,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.9023712873458862,
"min": -2.566873550415039,
"max": 95.74349975585938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06766346718289977,
"min": 0.06427575933513184,
"max": 0.07401210384535936,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9472885405605969,
"min": 0.4957895602227386,
"max": 1.059106483143599,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01625417106316447,
"min": 0.0015992942552375533,
"max": 0.0173175726479659,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2275583948843026,
"min": 0.022390119573325748,
"max": 0.2525851318496279,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3495832644571405e-06,
"min": 7.3495832644571405e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010289416570239997,
"min": 0.00010289416570239997,
"max": 0.0037621129459624,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244982857142858,
"min": 0.10244982857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4342976,
"min": 1.3886848,
"max": 2.6540376000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002547378742857142,
"min": 0.0002547378742857142,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003566330239999999,
"min": 0.003566330239999999,
"max": 0.12541835624,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009054148569703102,
"min": 0.009054148569703102,
"max": 0.4533422887325287,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12675808370113373,
"min": 0.12675808370113373,
"max": 3.173396110534668,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 305.19607843137254,
"min": 266.0619469026549,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31130.0,
"min": 15984.0,
"max": 34164.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6163470379131681,
"min": -1.0000000521540642,
"max": 1.6985327287585334,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 164.86739786714315,
"min": -26.797801658511162,
"max": 191.93419834971428,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6163470379131681,
"min": -1.0000000521540642,
"max": 1.6985327287585334,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 164.86739786714315,
"min": -26.797801658511162,
"max": 191.93419834971428,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029032233561498717,
"min": 0.027882801023965075,
"max": 9.307319254614413,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.961287823272869,
"min": 2.729771501151845,
"max": 148.9171080738306,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701335253",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701337664"
},
"total": 2411.3080466879996,
"count": 1,
"self": 0.4764549319997968,
"children": {
"run_training.setup": {
"total": 0.09151876699979766,
"count": 1,
"self": 0.09151876699979766
},
"TrainerController.start_learning": {
"total": 2410.740072989,
"count": 1,
"self": 1.5917374041337098,
"children": {
"TrainerController._reset_env": {
"total": 3.474739236000005,
"count": 1,
"self": 3.474739236000005
},
"TrainerController.advance": {
"total": 2405.592125952867,
"count": 64274,
"self": 1.515865463717546,
"children": {
"env_step": {
"total": 1747.1918395519774,
"count": 64274,
"self": 1611.7628927387736,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.4874000090208,
"count": 64274,
"self": 4.951448724922557,
"children": {
"TorchPolicy.evaluate": {
"total": 129.53595128409825,
"count": 62555,
"self": 129.53595128409825
}
}
},
"workers": {
"total": 0.9415468041829627,
"count": 64274,
"self": 0.0,
"children": {
"worker_root": {
"total": 2405.5558260760445,
"count": 64274,
"is_parallel": true,
"self": 918.4647401010548,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023859160000938573,
"count": 1,
"is_parallel": true,
"self": 0.0006724040003973641,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017135119996964931,
"count": 8,
"is_parallel": true,
"self": 0.0017135119996964931
}
}
},
"UnityEnvironment.step": {
"total": 0.0485102940001525,
"count": 1,
"is_parallel": true,
"self": 0.0006168260001686576,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048825600015334203,
"count": 1,
"is_parallel": true,
"self": 0.00048825600015334203
},
"communicator.exchange": {
"total": 0.04570582399992418,
"count": 1,
"is_parallel": true,
"self": 0.04570582399992418
},
"steps_from_proto": {
"total": 0.001699387999906321,
"count": 1,
"is_parallel": true,
"self": 0.00038168800028870464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013176999996176164,
"count": 8,
"is_parallel": true,
"self": 0.0013176999996176164
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1487.0910859749897,
"count": 64273,
"is_parallel": true,
"self": 35.8143449369677,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.63531981901224,
"count": 64273,
"is_parallel": true,
"self": 24.63531981901224
},
"communicator.exchange": {
"total": 1325.57111687506,
"count": 64273,
"is_parallel": true,
"self": 1325.57111687506
},
"steps_from_proto": {
"total": 101.07030434394983,
"count": 64273,
"is_parallel": true,
"self": 20.639130637049675,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.43117370690015,
"count": 514184,
"is_parallel": true,
"self": 80.43117370690015
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 656.8844209371719,
"count": 64274,
"self": 2.841118149090562,
"children": {
"process_trajectory": {
"total": 131.98837539208444,
"count": 64274,
"self": 131.81307564908502,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17529974299941387,
"count": 2,
"self": 0.17529974299941387
}
}
},
"_update_policy": {
"total": 522.0549273959969,
"count": 458,
"self": 311.5600276009959,
"children": {
"TorchPPOOptimizer.update": {
"total": 210.49489979500095,
"count": 22812,
"self": 210.49489979500095
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.899996828404255e-07,
"count": 1,
"self": 8.899996828404255e-07
},
"TrainerController._save_models": {
"total": 0.08146950599984848,
"count": 1,
"self": 0.0013672830000359681,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08010222299981251,
"count": 1,
"self": 0.08010222299981251
}
}
}
}
}
}
}