ppo-PyramidsRND / run_logs /timers.json
Leonhard17's picture
First Push
40fd83e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.7421780824661255,
"min": 0.7410928010940552,
"max": 1.5356957912445068,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 22502.83984375,
"min": 22257.6015625,
"max": 46586.8671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989989.0,
"min": 29952.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989989.0,
"min": 29952.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01910206861793995,
"min": -0.10067018121480942,
"max": 0.06902420520782471,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -4.660904884338379,
"min": -24.160842895507812,
"max": 16.358736038208008,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013224447146058083,
"min": 0.007502286229282618,
"max": 0.23830826580524445,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.2267651557922363,
"min": 1.8380601406097412,
"max": 56.47905731201172,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0686320709943832,
"min": 0.0637754577364247,
"max": 0.0722533695646342,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9608489939213649,
"min": 0.4856038780219923,
"max": 1.0586661310665602,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0032969407652952325,
"min": 5.543365844056117e-05,
"max": 0.004552297944195787,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.04615717071413326,
"min": 0.0007760712181678564,
"max": 0.06373217121874102,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.741425990985712e-06,
"min": 7.741425990985712e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010837996387379998,
"min": 0.00010837996387379998,
"max": 0.0035069333310222997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10258044285714286,
"min": 0.10258044285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4361262000000001,
"min": 1.3691136000000002,
"max": 2.5689777,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002677862414285714,
"min": 0.0002677862414285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00374900738,
"min": 0.00374900738,
"max": 0.11692087223000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010056658647954464,
"min": 0.009605438448488712,
"max": 0.3359794616699219,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14079321920871735,
"min": 0.13447614014148712,
"max": 2.351856231689453,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 915.1212121212121,
"min": 866.2647058823529,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30199.0,
"min": 15984.0,
"max": 33111.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.4307394409269998,
"min": -1.0000000521540642,
"max": -0.21959416721673572,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -14.214401550590992,
"min": -32.000001668930054,
"max": -7.466201685369015,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.4307394409269998,
"min": -1.0000000521540642,
"max": -0.21959416721673572,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -14.214401550590992,
"min": -32.000001668930054,
"max": -7.466201685369015,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09495396871881728,
"min": 0.09074013905859936,
"max": 6.979924153536558,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1334809677209705,
"min": 2.6644747401587665,
"max": 111.67878645658493,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677163630",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677165637"
},
"total": 2007.2853203290006,
"count": 1,
"self": 0.4251379639999868,
"children": {
"run_training.setup": {
"total": 0.10730800600003931,
"count": 1,
"self": 0.10730800600003931
},
"TrainerController.start_learning": {
"total": 2006.7528743590005,
"count": 1,
"self": 1.174819435047084,
"children": {
"TrainerController._reset_env": {
"total": 6.070760311999948,
"count": 1,
"self": 6.070760311999948
},
"TrainerController.advance": {
"total": 1999.4226062759535,
"count": 63182,
"self": 1.3013329139839698,
"children": {
"env_step": {
"total": 1290.1449385780134,
"count": 63182,
"self": 1179.948758292,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.44989321404137,
"count": 63182,
"self": 4.570359166961225,
"children": {
"TorchPolicy.evaluate": {
"total": 104.87953404708014,
"count": 62562,
"self": 35.792288428126085,
"children": {
"TorchPolicy.sample_actions": {
"total": 69.08724561895406,
"count": 62562,
"self": 69.08724561895406
}
}
}
}
},
"workers": {
"total": 0.7462870719718921,
"count": 63182,
"self": 0.0,
"children": {
"worker_root": {
"total": 2002.7802467120182,
"count": 63182,
"is_parallel": true,
"self": 930.5987186399761,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019465889999992214,
"count": 1,
"is_parallel": true,
"self": 0.0007252869995681976,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012213020004310238,
"count": 8,
"is_parallel": true,
"self": 0.0012213020004310238
}
}
},
"UnityEnvironment.step": {
"total": 0.045318216000396205,
"count": 1,
"is_parallel": true,
"self": 0.0005317750001267996,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006004040001243993,
"count": 1,
"is_parallel": true,
"self": 0.0006004040001243993
},
"communicator.exchange": {
"total": 0.04225714099993638,
"count": 1,
"is_parallel": true,
"self": 0.04225714099993638
},
"steps_from_proto": {
"total": 0.0019288960002086242,
"count": 1,
"is_parallel": true,
"self": 0.00047382300090248464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014550729993061395,
"count": 8,
"is_parallel": true,
"self": 0.0014550729993061395
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1072.181528072042,
"count": 63181,
"is_parallel": true,
"self": 30.681191433087406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.627788190978663,
"count": 63181,
"is_parallel": true,
"self": 23.627788190978663
},
"communicator.exchange": {
"total": 925.3367838889576,
"count": 63181,
"is_parallel": true,
"self": 925.3367838889576
},
"steps_from_proto": {
"total": 92.53576455901839,
"count": 63181,
"is_parallel": true,
"self": 21.82361187023298,
"children": {
"_process_rank_one_or_two_observation": {
"total": 70.71215268878541,
"count": 505448,
"is_parallel": true,
"self": 70.71215268878541
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 707.9763347839562,
"count": 63182,
"self": 2.3028546179571094,
"children": {
"process_trajectory": {
"total": 157.39933632800467,
"count": 63182,
"self": 157.21129393200408,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1880423960005828,
"count": 2,
"self": 0.1880423960005828
}
}
},
"_update_policy": {
"total": 548.2741438379944,
"count": 441,
"self": 208.31531062805516,
"children": {
"TorchPPOOptimizer.update": {
"total": 339.95883320993926,
"count": 22773,
"self": 339.95883320993926
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.600000001024455e-07,
"count": 1,
"self": 8.600000001024455e-07
},
"TrainerController._save_models": {
"total": 0.08468747599999915,
"count": 1,
"self": 0.0015221200001178659,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08316535599988129,
"count": 1,
"self": 0.08316535599988129
}
}
}
}
}
}
}