ppo-Pyramids / run_logs /timers.json
zipbomb's picture
First training of Pyramids
a6cb210
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6659862995147705,
"min": 0.662655770778656,
"max": 1.460771083831787,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20192.705078125,
"min": 19986.283203125,
"max": 44313.953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989978.0,
"min": 29952.0,
"max": 989978.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.21394366025924683,
"min": -0.18412478268146515,
"max": 0.2318175584077835,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 54.9835205078125,
"min": -43.6375732421875,
"max": 58.88166046142578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.14062947034835815,
"min": -0.00036451363121159375,
"max": 0.3908810317516327,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 36.14177322387695,
"min": -0.0914929211139679,
"max": 93.81144714355469,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06647102083673821,
"min": 0.06475241046091698,
"max": 0.07217839323718744,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.930594291714335,
"min": 0.4816577655871081,
"max": 1.0338769196920718,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01696365754205366,
"min": 0.0001342474403295256,
"max": 0.01696365754205366,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23749120558875123,
"min": 0.0017452167242838327,
"max": 0.23749120558875123,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.464826083185713e-06,
"min": 7.464826083185713e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010450756516459998,
"min": 0.00010450756516459998,
"max": 0.003507159230946999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248824285714288,
"min": 0.10248824285714288,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348354000000003,
"min": 1.3886848,
"max": 2.5690530000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025857546142857144,
"min": 0.00025857546142857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00362005646,
"min": 0.00362005646,
"max": 0.1169283947,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00761911878362298,
"min": 0.007568482309579849,
"max": 0.46073487401008606,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.106667660176754,
"min": 0.10595875233411789,
"max": 3.225144147872925,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 582.88,
"min": 582.88,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29144.0,
"min": 15984.0,
"max": 32274.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.7769319699704647,
"min": -1.0000000521540642,
"max": 0.9816897537331192,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 38.846598498523235,
"min": -31.99760167300701,
"max": 48.10279793292284,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.7769319699704647,
"min": -1.0000000521540642,
"max": 0.9816897537331192,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 38.846598498523235,
"min": -31.99760167300701,
"max": 48.10279793292284,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04596046161139384,
"min": 0.04596046161139384,
"max": 9.462954918853939,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.298023080569692,
"min": 2.298023080569692,
"max": 151.40727870166302,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673718402",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673720321"
},
"total": 1918.401975533,
"count": 1,
"self": 0.4790822089998983,
"children": {
"run_training.setup": {
"total": 0.12037894500008406,
"count": 1,
"self": 0.12037894500008406
},
"TrainerController.start_learning": {
"total": 1917.802514379,
"count": 1,
"self": 1.1754540659039776,
"children": {
"TrainerController._reset_env": {
"total": 6.365599019999991,
"count": 1,
"self": 6.365599019999991
},
"TrainerController.advance": {
"total": 1910.1741313420966,
"count": 63345,
"self": 1.3113069622431794,
"children": {
"env_step": {
"total": 1246.8023947209226,
"count": 63345,
"self": 1143.5291146219647,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.50529609706837,
"count": 63345,
"self": 4.360987085010493,
"children": {
"TorchPolicy.evaluate": {
"total": 98.14430901205787,
"count": 62567,
"self": 33.504128435962684,
"children": {
"TorchPolicy.sample_actions": {
"total": 64.64018057609519,
"count": 62567,
"self": 64.64018057609519
}
}
}
}
},
"workers": {
"total": 0.7679840018895447,
"count": 63345,
"self": 0.0,
"children": {
"worker_root": {
"total": 1913.595840962099,
"count": 63345,
"is_parallel": true,
"self": 869.4338981230817,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001822179000100732,
"count": 1,
"is_parallel": true,
"self": 0.0006873389993415913,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011348400007591408,
"count": 8,
"is_parallel": true,
"self": 0.0011348400007591408
}
}
},
"UnityEnvironment.step": {
"total": 0.057410238000102254,
"count": 1,
"is_parallel": true,
"self": 0.0005898330000491114,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048449199994138326,
"count": 1,
"is_parallel": true,
"self": 0.00048449199994138326
},
"communicator.exchange": {
"total": 0.05456667299995388,
"count": 1,
"is_parallel": true,
"self": 0.05456667299995388
},
"steps_from_proto": {
"total": 0.001769240000157879,
"count": 1,
"is_parallel": true,
"self": 0.00046247799991760985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013067620002402691,
"count": 8,
"is_parallel": true,
"self": 0.0013067620002402691
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1044.1619428390172,
"count": 63344,
"is_parallel": true,
"self": 27.87709308397143,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.74193407423172,
"count": 63344,
"is_parallel": true,
"self": 23.74193407423172
},
"communicator.exchange": {
"total": 887.9755126498958,
"count": 63344,
"is_parallel": true,
"self": 887.9755126498958
},
"steps_from_proto": {
"total": 104.56740303091829,
"count": 63344,
"is_parallel": true,
"self": 22.882991207988653,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.68441182292963,
"count": 506752,
"is_parallel": true,
"self": 81.68441182292963
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 662.0604296589308,
"count": 63345,
"self": 2.358924271959495,
"children": {
"process_trajectory": {
"total": 150.02767638796877,
"count": 63345,
"self": 149.83631576896914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19136061899962442,
"count": 2,
"self": 0.19136061899962442
}
}
},
"_update_policy": {
"total": 509.6738289990026,
"count": 449,
"self": 198.67184173601572,
"children": {
"TorchPPOOptimizer.update": {
"total": 311.00198726298686,
"count": 22797,
"self": 311.00198726298686
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.339992175227962e-07,
"count": 1,
"self": 9.339992175227962e-07
},
"TrainerController._save_models": {
"total": 0.08732901700022921,
"count": 1,
"self": 0.0014011420007591369,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08592787499947008,
"count": 1,
"self": 0.08592787499947008
}
}
}
}
}
}
}