PyramidsRND / run_logs /timers.json
Isaacgv's picture
First Push
3a4ef0b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34996655583381653,
"min": 0.3432227075099945,
"max": 1.4662762880325317,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10543.7919921875,
"min": 10373.5634765625,
"max": 44480.95703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989942.0,
"min": 29952.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989942.0,
"min": 29952.0,
"max": 989942.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6195990443229675,
"min": -0.07766404002904892,
"max": 0.6195990443229675,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 174.7269287109375,
"min": -18.71703338623047,
"max": 174.7269287109375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0073569887317717075,
"min": -0.009385962039232254,
"max": 0.4073856472969055,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.0746707916259766,
"min": -2.57175350189209,
"max": 96.55039978027344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07162530256097115,
"min": 0.06564826246107086,
"max": 0.07278591522838909,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0027542358535961,
"min": 0.5042243629331692,
"max": 1.0917887284258363,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014576663561109898,
"min": 0.0009927405611326347,
"max": 0.017291181238154724,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20407328985553858,
"min": 0.011912886733591617,
"max": 0.2444233798727795,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.497376072335709e-06,
"min": 7.497376072335709e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010496326501269993,
"min": 0.00010496326501269993,
"max": 0.0035090444303185996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249909285714286,
"min": 0.10249909285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349873,
"min": 1.3886848,
"max": 2.5696814,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002596593764285713,
"min": 0.0002596593764285713,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003635231269999998,
"min": 0.003635231269999998,
"max": 0.11699117185999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013611873611807823,
"min": 0.013611873611807823,
"max": 0.5140193700790405,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19056622684001923,
"min": 0.19056622684001923,
"max": 3.5981357097625732,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 324.4193548387097,
"min": 315.8888888888889,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30171.0,
"min": 15984.0,
"max": 32605.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6325634248474592,
"min": -1.0000000521540642,
"max": 1.6396444239550167,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 151.8283985108137,
"min": -30.356601633131504,
"max": 151.8283985108137,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6325634248474592,
"min": -1.0000000521540642,
"max": 1.6396444239550167,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 151.8283985108137,
"min": -30.356601633131504,
"max": 151.8283985108137,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04612936625277163,
"min": 0.04612936625277163,
"max": 11.1210004594177,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.2900310615077615,
"min": 4.18498034554068,
"max": 177.9360073506832,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680016725",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1680018870"
},
"total": 2144.9299244940003,
"count": 1,
"self": 0.47590559699983714,
"children": {
"run_training.setup": {
"total": 0.10975345500014555,
"count": 1,
"self": 0.10975345500014555
},
"TrainerController.start_learning": {
"total": 2144.344265442,
"count": 1,
"self": 1.304164974987998,
"children": {
"TrainerController._reset_env": {
"total": 7.236001647999956,
"count": 1,
"self": 7.236001647999956
},
"TrainerController.advance": {
"total": 2135.695587766012,
"count": 63942,
"self": 1.3600496560311512,
"children": {
"env_step": {
"total": 1513.1447930210152,
"count": 63942,
"self": 1406.445093993949,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.91002619699066,
"count": 63942,
"self": 4.643263181010525,
"children": {
"TorchPolicy.evaluate": {
"total": 101.26676301598013,
"count": 62569,
"self": 101.26676301598013
}
}
},
"workers": {
"total": 0.7896728300754603,
"count": 63942,
"self": 0.0,
"children": {
"worker_root": {
"total": 2139.9777962070452,
"count": 63942,
"is_parallel": true,
"self": 844.4428944070901,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001675119000083214,
"count": 1,
"is_parallel": true,
"self": 0.0005431919998954982,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011319270001877157,
"count": 8,
"is_parallel": true,
"self": 0.0011319270001877157
}
}
},
"UnityEnvironment.step": {
"total": 0.04866774999982226,
"count": 1,
"is_parallel": true,
"self": 0.0005001449997052987,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045261800005391706,
"count": 1,
"is_parallel": true,
"self": 0.00045261800005391706
},
"communicator.exchange": {
"total": 0.04604857799995443,
"count": 1,
"is_parallel": true,
"self": 0.04604857799995443
},
"steps_from_proto": {
"total": 0.0016664090001086151,
"count": 1,
"is_parallel": true,
"self": 0.0003502619999835588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013161470001250564,
"count": 8,
"is_parallel": true,
"self": 0.0013161470001250564
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1295.5349017999552,
"count": 63941,
"is_parallel": true,
"self": 30.577891831843317,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.20192732600026,
"count": 63941,
"is_parallel": true,
"self": 22.20192732600026
},
"communicator.exchange": {
"total": 1153.5636303531026,
"count": 63941,
"is_parallel": true,
"self": 1153.5636303531026
},
"steps_from_proto": {
"total": 89.19145228900902,
"count": 63941,
"is_parallel": true,
"self": 18.807904359965505,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.38354792904352,
"count": 511528,
"is_parallel": true,
"self": 70.38354792904352
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.1907450889655,
"count": 63942,
"self": 2.5672509439223177,
"children": {
"process_trajectory": {
"total": 116.21951173404887,
"count": 63942,
"self": 116.02119680204896,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1983149319999029,
"count": 2,
"self": 0.1983149319999029
}
}
},
"_update_policy": {
"total": 502.40398241099433,
"count": 454,
"self": 317.65735919700865,
"children": {
"TorchPPOOptimizer.update": {
"total": 184.74662321398569,
"count": 22830,
"self": 184.74662321398569
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.509999472356867e-07,
"count": 1,
"self": 9.509999472356867e-07
},
"TrainerController._save_models": {
"total": 0.10851010200030942,
"count": 1,
"self": 0.0014914410003257217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1070186609999837,
"count": 1,
"self": 0.1070186609999837
}
}
}
}
}
}
}