leap120gr's picture
First Push
4156ea0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.30666399002075195,
"min": 0.30666399002075195,
"max": 1.3609859943389893,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9278.42578125,
"min": 9278.42578125,
"max": 41286.87109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989988.0,
"min": 29952.0,
"max": 989988.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6319425702095032,
"min": -0.1605808287858963,
"max": 0.6439498662948608,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 177.57586669921875,
"min": -38.057655334472656,
"max": 183.14596557617188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0022597666829824448,
"min": -0.04774706810712814,
"max": 0.514366626739502,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.6349944472312927,
"min": -12.843961715698242,
"max": 121.90489196777344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07059191777911371,
"min": 0.0643295065387922,
"max": 0.07390147254911773,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9882868489075919,
"min": 0.4769353547251259,
"max": 1.0377979457358986,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015937887618617554,
"min": 0.00030572229860254815,
"max": 0.017113225392503886,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22313042666064575,
"min": 0.002445778388820385,
"max": 0.2395851554950544,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.693854578271428e-06,
"min": 7.693854578271428e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001077139640958,
"min": 0.0001077139640958,
"max": 0.0033526374824543,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256458571428571,
"min": 0.10256458571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359042,
"min": 1.3691136000000002,
"max": 2.6175457000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002662021128571429,
"min": 0.0002662021128571429,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037268295800000004,
"min": 0.0037268295800000004,
"max": 0.11179281542999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008735124953091145,
"min": 0.008735124953091145,
"max": 0.57728511095047,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12229175120592117,
"min": 0.12229175120592117,
"max": 4.0409955978393555,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 310.8404255319149,
"min": 279.3727272727273,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29219.0,
"min": 15984.0,
"max": 32557.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6465872281250802,
"min": -1.0000000521540642,
"max": 1.6862242277523485,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 154.77919944375753,
"min": -32.000001668930054,
"max": 185.2677981853485,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6465872281250802,
"min": -1.0000000521540642,
"max": 1.6862242277523485,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 154.77919944375753,
"min": -32.000001668930054,
"max": 185.2677981853485,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02810853449974526,
"min": 0.02579433433817361,
"max": 11.65544612519443,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6422022429760545,
"min": 2.599200731056044,
"max": 186.48713800311089,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724982427",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1724985993"
},
"total": 3566.1382640620004,
"count": 1,
"self": 0.7030605499999183,
"children": {
"run_training.setup": {
"total": 0.11336169900005189,
"count": 1,
"self": 0.11336169900005189
},
"TrainerController.start_learning": {
"total": 3565.3218418130004,
"count": 1,
"self": 2.405519894043664,
"children": {
"TrainerController._reset_env": {
"total": 3.429263828000103,
"count": 1,
"self": 3.429263828000103
},
"TrainerController.advance": {
"total": 3559.400233782956,
"count": 64218,
"self": 2.5399560499945437,
"children": {
"env_step": {
"total": 2402.082794324945,
"count": 64218,
"self": 2223.50707920297,
"children": {
"SubprocessEnvManager._take_step": {
"total": 177.01699710398498,
"count": 64218,
"self": 7.724018700010902,
"children": {
"TorchPolicy.evaluate": {
"total": 169.29297840397408,
"count": 62575,
"self": 169.29297840397408
}
}
},
"workers": {
"total": 1.5587180179900315,
"count": 64218,
"self": 0.0,
"children": {
"worker_root": {
"total": 3557.54079078699,
"count": 64218,
"is_parallel": true,
"self": 1528.4522684940168,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004928995000000214,
"count": 1,
"is_parallel": true,
"self": 0.0009593300005690253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003969664999431188,
"count": 8,
"is_parallel": true,
"self": 0.003969664999431188
}
}
},
"UnityEnvironment.step": {
"total": 0.06602236500020808,
"count": 1,
"is_parallel": true,
"self": 0.0008216700000502897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005212840001149743,
"count": 1,
"is_parallel": true,
"self": 0.0005212840001149743
},
"communicator.exchange": {
"total": 0.06209966799997346,
"count": 1,
"is_parallel": true,
"self": 0.06209966799997346
},
"steps_from_proto": {
"total": 0.0025797430000693566,
"count": 1,
"is_parallel": true,
"self": 0.0004627670002719242,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021169759997974325,
"count": 8,
"is_parallel": true,
"self": 0.0021169759997974325
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2029.088522292973,
"count": 64217,
"is_parallel": true,
"self": 52.525770423747645,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.62955435213121,
"count": 64217,
"is_parallel": true,
"self": 31.62955435213121
},
"communicator.exchange": {
"total": 1810.045950765069,
"count": 64217,
"is_parallel": true,
"self": 1810.045950765069
},
"steps_from_proto": {
"total": 134.88724675202525,
"count": 64217,
"is_parallel": true,
"self": 28.99446972375881,
"children": {
"_process_rank_one_or_two_observation": {
"total": 105.89277702826644,
"count": 513736,
"is_parallel": true,
"self": 105.89277702826644
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1154.7774834080165,
"count": 64218,
"self": 4.540215145979573,
"children": {
"process_trajectory": {
"total": 173.78010705103839,
"count": 64218,
"self": 173.53161740303835,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2484896480000316,
"count": 2,
"self": 0.2484896480000316
}
}
},
"_update_policy": {
"total": 976.4571612109985,
"count": 444,
"self": 386.448967764032,
"children": {
"TorchPPOOptimizer.update": {
"total": 590.0081934469665,
"count": 22800,
"self": 590.0081934469665
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0459998520673253e-06,
"count": 1,
"self": 1.0459998520673253e-06
},
"TrainerController._save_models": {
"total": 0.08682326200050738,
"count": 1,
"self": 0.002191342000514851,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08463191999999253,
"count": 1,
"self": 0.08463191999999253
}
}
}
}
}
}
}