ppo-Pyramids / run_logs /timers.json
Dotunnorth's picture
First Push
d20729f
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.9556476473808289,
"min": 0.9556476473808289,
"max": 1.4422943592071533,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 28623.55859375,
"min": 28623.55859375,
"max": 43753.44140625,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89874.0,
"min": 29947.0,
"max": 89874.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89874.0,
"min": 29947.0,
"max": 89874.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.07488056272268295,
"min": -0.07488056272268295,
"max": 0.1123107448220253,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -17.97133445739746,
"min": -17.97133445739746,
"max": 26.617647171020508,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.25155574083328247,
"min": 0.25155574083328247,
"max": 0.5610938668251038,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 60.37337875366211,
"min": 60.37337875366211,
"max": 132.979248046875,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0710034301075805,
"min": 0.07016470195710733,
"max": 0.07166829964202696,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.7100343010758051,
"min": 0.5016780974941888,
"max": 0.7718117215281807,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.001017838796200628,
"min": 0.0007846690836464002,
"max": 0.017596790535433636,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.01017838796200628,
"min": 0.008631359920110403,
"max": 0.12317753374803546,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3974075342e-05,
"min": 7.3974075342e-05,
"max": 0.0002523313016038571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0007397407534199999,
"min": 0.0007397407534199999,
"max": 0.0018188854937050002,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.124658,
"min": 0.124658,
"max": 0.1841104285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.24658,
"min": 1.24658,
"max": 1.7062950000000001,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0024733342,
"min": 0.0024733342,
"max": 0.008412631814285714,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.024733342,
"min": 0.024733342,
"max": 0.060678870499999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.17118653655052185,
"min": 0.17118653655052185,
"max": 0.7077566981315613,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.7118654251098633,
"min": 1.7118654251098633,
"max": 4.954297065734863,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 979.71875,
"min": 979.71875,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31351.0,
"min": 16746.0,
"max": 31968.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.918168802279979,
"min": -0.999962551984936,
"max": -0.8683529908166212,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -29.381401672959328,
"min": -31.998801663517952,
"max": -14.76200084388256,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.918168802279979,
"min": -0.999962551984936,
"max": -0.8683529908166212,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -29.381401672959328,
"min": -31.998801663517952,
"max": -14.76200084388256,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.9300726288929582,
"min": 1.9300726288929582,
"max": 14.233673143036226,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 61.76232412457466,
"min": 61.76232412457466,
"max": 241.97244343161583,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704477861",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704478166"
},
"total": 305.7676713290002,
"count": 1,
"self": 0.5986219640003583,
"children": {
"run_training.setup": {
"total": 0.0989554739999221,
"count": 1,
"self": 0.0989554739999221
},
"TrainerController.start_learning": {
"total": 305.07009389099994,
"count": 1,
"self": 0.23151778401279444,
"children": {
"TrainerController._reset_env": {
"total": 4.216463011999622,
"count": 1,
"self": 4.216463011999622
},
"TrainerController.advance": {
"total": 300.5260965539869,
"count": 6287,
"self": 0.26794113095593275,
"children": {
"env_step": {
"total": 184.0304310240249,
"count": 6287,
"self": 167.02696551102827,
"children": {
"SubprocessEnvManager._take_step": {
"total": 16.87099482900294,
"count": 6287,
"self": 0.7223682099997859,
"children": {
"TorchPolicy.evaluate": {
"total": 16.148626619003153,
"count": 6279,
"self": 16.148626619003153
}
}
},
"workers": {
"total": 0.132470683993688,
"count": 6287,
"self": 0.0,
"children": {
"worker_root": {
"total": 304.50560149399416,
"count": 6287,
"is_parallel": true,
"self": 154.14383546399404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007514500000070257,
"count": 1,
"is_parallel": true,
"self": 0.004867936000209738,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026465639998605184,
"count": 8,
"is_parallel": true,
"self": 0.0026465639998605184
}
}
},
"UnityEnvironment.step": {
"total": 0.06601605700006985,
"count": 1,
"is_parallel": true,
"self": 0.000723966999885306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00052602100004151,
"count": 1,
"is_parallel": true,
"self": 0.00052602100004151
},
"communicator.exchange": {
"total": 0.06277542399993763,
"count": 1,
"is_parallel": true,
"self": 0.06277542399993763
},
"steps_from_proto": {
"total": 0.0019906450002054044,
"count": 1,
"is_parallel": true,
"self": 0.0004215439994368353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001569101000768569,
"count": 8,
"is_parallel": true,
"self": 0.001569101000768569
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 150.36176603000013,
"count": 6286,
"is_parallel": true,
"self": 4.671901029020319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.76358246400423,
"count": 6286,
"is_parallel": true,
"self": 2.76358246400423
},
"communicator.exchange": {
"total": 129.9208846049819,
"count": 6286,
"is_parallel": true,
"self": 129.9208846049819
},
"steps_from_proto": {
"total": 13.005397931993684,
"count": 6286,
"is_parallel": true,
"self": 2.858863502987788,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10.146534429005897,
"count": 50288,
"is_parallel": true,
"self": 10.146534429005897
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 116.22772439900609,
"count": 6287,
"self": 0.3249029690041425,
"children": {
"process_trajectory": {
"total": 16.562552188004247,
"count": 6287,
"self": 16.562552188004247
},
"_update_policy": {
"total": 99.3402692419977,
"count": 33,
"self": 39.61665144399376,
"children": {
"TorchPPOOptimizer.update": {
"total": 59.72361779800394,
"count": 2319,
"self": 59.72361779800394
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0430003385408781e-06,
"count": 1,
"self": 1.0430003385408781e-06
},
"TrainerController._save_models": {
"total": 0.09601549800026987,
"count": 1,
"self": 0.0020089009999537666,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0940065970003161,
"count": 1,
"self": 0.0940065970003161
}
}
}
}
}
}
}