Pyramids / run_logs /timers.json
ntn201105's picture
First Commit
27b36b2 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4428914189338684,
"min": 0.4428914189338684,
"max": 1.4335201978683472,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13308.0009765625,
"min": 13308.0009765625,
"max": 43487.26953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.524365246295929,
"min": -0.09930995851755142,
"max": 0.5550134778022766,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 146.822265625,
"min": -23.83439064025879,
"max": 154.8487548828125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.003061752999201417,
"min": -0.06743450462818146,
"max": 0.28995344042778015,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.8572908639907837,
"min": -18.477054595947266,
"max": 68.71896362304688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06949498285280747,
"min": 0.06517484901641082,
"max": 0.07464399516973964,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.042424742792112,
"min": 0.48777097151025955,
"max": 1.0675947509977657,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01541273717949581,
"min": 0.00033963280511144244,
"max": 0.015687364884787527,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23119105769243714,
"min": 0.004075593661337309,
"max": 0.23119105769243714,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.582877472406667e-06,
"min": 7.582877472406667e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001137431620861,
"min": 0.0001137431620861,
"max": 0.0036096207967930994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252759333333336,
"min": 0.10252759333333336,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5379139000000004,
"min": 1.3886848,
"max": 2.5032069,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026250657400000006,
"min": 0.00026250657400000006,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003937598610000001,
"min": 0.003937598610000001,
"max": 0.12033036931000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01009493786841631,
"min": 0.00989051628857851,
"max": 0.38469764590263367,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15142406523227692,
"min": 0.1384672224521637,
"max": 2.6928834915161133,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 340.69662921348316,
"min": 340.69662921348316,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30322.0,
"min": 15984.0,
"max": 33368.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5694044734654802,
"min": -1.0000000521540642,
"max": 1.570194178733273,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 139.67699813842773,
"min": -29.998401559889317,
"max": 139.67699813842773,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5694044734654802,
"min": -1.0000000521540642,
"max": 1.570194178733273,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 139.67699813842773,
"min": -29.998401559889317,
"max": 139.67699813842773,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.035075204382981486,
"min": 0.035075204382981486,
"max": 7.523131267167628,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1216931900853524,
"min": 2.990722785354592,
"max": 120.37010027468204,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739167099",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739170373"
},
"total": 3273.592814341,
"count": 1,
"self": 0.6469643649998034,
"children": {
"run_training.setup": {
"total": 0.05465175899996666,
"count": 1,
"self": 0.05465175899996666
},
"TrainerController.start_learning": {
"total": 3272.8911982170002,
"count": 1,
"self": 2.3270588050186234,
"children": {
"TrainerController._reset_env": {
"total": 4.559165635999989,
"count": 1,
"self": 4.559165635999989
},
"TrainerController.advance": {
"total": 3265.909761824982,
"count": 63671,
"self": 2.5310432549777033,
"children": {
"env_step": {
"total": 2141.9583583690282,
"count": 63671,
"self": 1970.3168366070345,
"children": {
"SubprocessEnvManager._take_step": {
"total": 170.2174462800309,
"count": 63671,
"self": 7.3331047240052385,
"children": {
"TorchPolicy.evaluate": {
"total": 162.88434155602567,
"count": 62559,
"self": 162.88434155602567
}
}
},
"workers": {
"total": 1.4240754819628592,
"count": 63671,
"self": 0.0,
"children": {
"worker_root": {
"total": 3265.441432671991,
"count": 63671,
"is_parallel": true,
"self": 1473.663736878976,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.010189213999979074,
"count": 1,
"is_parallel": true,
"self": 0.00707990999978847,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0031093040001906047,
"count": 8,
"is_parallel": true,
"self": 0.0031093040001906047
}
}
},
"UnityEnvironment.step": {
"total": 0.06913875700001881,
"count": 1,
"is_parallel": true,
"self": 0.000713669999981903,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005669480000278782,
"count": 1,
"is_parallel": true,
"self": 0.0005669480000278782
},
"communicator.exchange": {
"total": 0.06548624800007019,
"count": 1,
"is_parallel": true,
"self": 0.06548624800007019
},
"steps_from_proto": {
"total": 0.0023718909999388416,
"count": 1,
"is_parallel": true,
"self": 0.0006500689999029419,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017218220000358997,
"count": 8,
"is_parallel": true,
"self": 0.0017218220000358997
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1791.7776957930153,
"count": 63670,
"is_parallel": true,
"self": 47.77004331708895,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.953911682986927,
"count": 63670,
"is_parallel": true,
"self": 30.953911682986927
},
"communicator.exchange": {
"total": 1586.512564845938,
"count": 63670,
"is_parallel": true,
"self": 1586.512564845938
},
"steps_from_proto": {
"total": 126.54117594700142,
"count": 63670,
"is_parallel": true,
"self": 27.49513827184512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.0460376751563,
"count": 509360,
"is_parallel": true,
"self": 99.0460376751563
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1121.4203602009761,
"count": 63671,
"self": 4.7135477689412255,
"children": {
"process_trajectory": {
"total": 165.9967078570387,
"count": 63671,
"self": 165.5726975390387,
"children": {
"RLTrainer._checkpoint": {
"total": 0.42401031800000055,
"count": 2,
"self": 0.42401031800000055
}
}
},
"_update_policy": {
"total": 950.7101045749962,
"count": 451,
"self": 367.5822639650088,
"children": {
"TorchPPOOptimizer.update": {
"total": 583.1278406099874,
"count": 22824,
"self": 583.1278406099874
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.539999155094847e-07,
"count": 1,
"self": 9.539999155094847e-07
},
"TrainerController._save_models": {
"total": 0.09521099699986735,
"count": 1,
"self": 0.002019220999954996,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09319177599991235,
"count": 1,
"self": 0.09319177599991235
}
}
}
}
}
}
}