ppo-Pyramids / run_logs /timers.json
jason1i's picture
First Push
dded814
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.33084580302238464,
"min": 0.33084580302238464,
"max": 1.451694369316101,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9909.4931640625,
"min": 9909.4931640625,
"max": 44038.6015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989981.0,
"min": 29952.0,
"max": 989981.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5402618050575256,
"min": -0.12139929831027985,
"max": 0.5834379196166992,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 149.65252685546875,
"min": -29.257230758666992,
"max": 161.02886962890625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.2683824896812439,
"min": -0.07650831341743469,
"max": 0.46127843856811523,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 74.34194946289062,
"min": -21.116294860839844,
"max": 109.32299041748047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06877145826873951,
"min": 0.062181353535769236,
"max": 0.07517705909309111,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9628004157623531,
"min": 0.48514814709645065,
"max": 1.078751832064168,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016013096445628133,
"min": 0.0005506212920048209,
"max": 0.02007227802823763,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22418335023879385,
"min": 0.004404970336038567,
"max": 0.2810118923953268,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.656076019435712e-06,
"min": 7.656076019435712e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010718506427209997,
"min": 0.00010718506427209997,
"max": 0.0036329686890104996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255199285714285,
"min": 0.10255199285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357278999999998,
"min": 1.3691136000000002,
"max": 2.6109895000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002649440864285714,
"min": 0.0002649440864285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037092172099999995,
"min": 0.0037092172099999995,
"max": 0.12111785105,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005724163260310888,
"min": 0.005724163260310888,
"max": 0.4692079722881317,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08013828843832016,
"min": 0.08013828843832016,
"max": 3.2844557762145996,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 357.7682926829268,
"min": 349.575,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29337.0,
"min": 15984.0,
"max": 32741.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5622023850320332,
"min": -1.0000000521540642,
"max": 1.6287752879254611,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 129.66279795765877,
"min": -32.000001668930054,
"max": 134.19999831169844,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5622023850320332,
"min": -1.0000000521540642,
"max": 1.6287752879254611,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 129.66279795765877,
"min": -32.000001668930054,
"max": 134.19999831169844,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.021306698276108715,
"min": 0.021306698276108715,
"max": 9.634741872549057,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7684559569170233,
"min": 1.7376278094743611,
"max": 154.1558699607849,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673422819",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673424859"
},
"total": 2039.576549121,
"count": 1,
"self": 0.4490966979997211,
"children": {
"run_training.setup": {
"total": 0.10300787500000297,
"count": 1,
"self": 0.10300787500000297
},
"TrainerController.start_learning": {
"total": 2039.0244445480002,
"count": 1,
"self": 1.299920134961667,
"children": {
"TrainerController._reset_env": {
"total": 6.410990226999729,
"count": 1,
"self": 6.410990226999729
},
"TrainerController.advance": {
"total": 2031.2268430340391,
"count": 63756,
"self": 1.2935023010491022,
"children": {
"env_step": {
"total": 1357.5003929440436,
"count": 63756,
"self": 1253.4738230709995,
"children": {
"SubprocessEnvManager._take_step": {
"total": 103.26141341902803,
"count": 63756,
"self": 4.256858765043944,
"children": {
"TorchPolicy.evaluate": {
"total": 99.00455465398409,
"count": 62574,
"self": 33.157903889940826,
"children": {
"TorchPolicy.sample_actions": {
"total": 65.84665076404326,
"count": 62574,
"self": 65.84665076404326
}
}
}
}
},
"workers": {
"total": 0.7651564540160507,
"count": 63756,
"self": 0.0,
"children": {
"worker_root": {
"total": 2035.0463582510242,
"count": 63756,
"is_parallel": true,
"self": 880.1083707901275,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017176030000882747,
"count": 1,
"is_parallel": true,
"self": 0.0006091130007916945,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011084899992965802,
"count": 8,
"is_parallel": true,
"self": 0.0011084899992965802
}
}
},
"UnityEnvironment.step": {
"total": 0.04340030499997738,
"count": 1,
"is_parallel": true,
"self": 0.0005454220004139643,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044882099973619916,
"count": 1,
"is_parallel": true,
"self": 0.00044882099973619916
},
"communicator.exchange": {
"total": 0.040771745999791165,
"count": 1,
"is_parallel": true,
"self": 0.040771745999791165
},
"steps_from_proto": {
"total": 0.0016343160000360513,
"count": 1,
"is_parallel": true,
"self": 0.00042067900039910455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012136369996369467,
"count": 8,
"is_parallel": true,
"self": 0.0012136369996369467
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1154.9379874608967,
"count": 63755,
"is_parallel": true,
"self": 28.09537413391763,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.28525353402165,
"count": 63755,
"is_parallel": true,
"self": 22.28525353402165
},
"communicator.exchange": {
"total": 1012.2839293409129,
"count": 63755,
"is_parallel": true,
"self": 1012.2839293409129
},
"steps_from_proto": {
"total": 92.27343045204452,
"count": 63755,
"is_parallel": true,
"self": 21.521308093002972,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.75212235904155,
"count": 510040,
"is_parallel": true,
"self": 70.75212235904155
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 672.4329477889464,
"count": 63756,
"self": 2.322999409833301,
"children": {
"process_trajectory": {
"total": 143.4010899911127,
"count": 63756,
"self": 143.21102626711217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19006372400053806,
"count": 2,
"self": 0.19006372400053806
}
}
},
"_update_policy": {
"total": 526.7088583880004,
"count": 444,
"self": 204.7567504710073,
"children": {
"TorchPPOOptimizer.update": {
"total": 321.9521079169931,
"count": 22785,
"self": 321.9521079169931
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.269997462979518e-07,
"count": 1,
"self": 9.269997462979518e-07
},
"TrainerController._save_models": {
"total": 0.08669022499998391,
"count": 1,
"self": 0.0013451669992718962,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08534505800071202,
"count": 1,
"self": 0.08534505800071202
}
}
}
}
}
}
}