ppo-Pyramids / run_logs /timers.json
benjipeng's picture
First Push
85940b7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3641301393508911,
"min": 0.3641301393508911,
"max": 1.4050543308258057,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10906.42578125,
"min": 10906.42578125,
"max": 42623.7265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989970.0,
"min": 29952.0,
"max": 989970.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5595448613166809,
"min": -0.2719196081161499,
"max": 0.6096700429916382,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 154.43438720703125,
"min": -64.4449462890625,
"max": 176.19464111328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013961268588900566,
"min": 8.119442645693198e-05,
"max": 0.3795502781867981,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.8533101081848145,
"min": 0.021760106086730957,
"max": 91.47161865234375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06765645422315009,
"min": 0.06560947123934993,
"max": 0.07286513396751843,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9471903591241012,
"min": 0.5032966676447139,
"max": 1.0929770095127764,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014631277903655534,
"min": 0.000304046325296338,
"max": 0.01726568201168751,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20483789065117747,
"min": 0.0036485559035560563,
"max": 0.24171954816362515,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.654983162657142e-06,
"min": 7.654983162657142e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010716976427719999,
"min": 0.00010716976427719999,
"max": 0.0032561888146037995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255162857142856,
"min": 0.10255162857142856,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357228,
"min": 1.3886848,
"max": 2.4426691000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002649076942857143,
"min": 0.0002649076942857143,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00370870772,
"min": 0.00370870772,
"max": 0.10856108038000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012917337007820606,
"min": 0.012917337007820606,
"max": 0.5144421458244324,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18084271252155304,
"min": 0.18084271252155304,
"max": 3.601094961166382,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 337.7125,
"min": 311.8349514563107,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27017.0,
"min": 15984.0,
"max": 33958.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.612269982881844,
"min": -1.0000000521540642,
"max": 1.649322317786587,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 128.98159863054752,
"min": -29.997201673686504,
"max": 169.88019873201847,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.612269982881844,
"min": -1.0000000521540642,
"max": 1.649322317786587,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 128.98159863054752,
"min": -29.997201673686504,
"max": 169.88019873201847,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.045305376526084726,
"min": 0.04458843751525669,
"max": 10.155065756291151,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6244301220867783,
"min": 3.6244301220867783,
"max": 162.48105210065842,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698683297",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698685432"
},
"total": 2134.288449399,
"count": 1,
"self": 0.4819775200003278,
"children": {
"run_training.setup": {
"total": 0.04327120899998249,
"count": 1,
"self": 0.04327120899998249
},
"TrainerController.start_learning": {
"total": 2133.7632006699996,
"count": 1,
"self": 1.2120368389651048,
"children": {
"TrainerController._reset_env": {
"total": 8.621683996000002,
"count": 1,
"self": 8.621683996000002
},
"TrainerController.advance": {
"total": 2123.8573723470354,
"count": 63757,
"self": 1.2793689040736353,
"children": {
"env_step": {
"total": 1508.4827701549607,
"count": 63757,
"self": 1383.059610814998,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.67727112799625,
"count": 63757,
"self": 4.3879760519782565,
"children": {
"TorchPolicy.evaluate": {
"total": 120.28929507601799,
"count": 62556,
"self": 120.28929507601799
}
}
},
"workers": {
"total": 0.7458882119664167,
"count": 63757,
"self": 0.0,
"children": {
"worker_root": {
"total": 2129.605143176998,
"count": 63757,
"is_parallel": true,
"self": 851.7905133649972,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004156932000000779,
"count": 1,
"is_parallel": true,
"self": 0.003022574000027589,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00113435799997319,
"count": 8,
"is_parallel": true,
"self": 0.00113435799997319
}
}
},
"UnityEnvironment.step": {
"total": 0.07669269299998405,
"count": 1,
"is_parallel": true,
"self": 0.0006133820000115975,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046281900000622045,
"count": 1,
"is_parallel": true,
"self": 0.00046281900000622045
},
"communicator.exchange": {
"total": 0.07399142699998151,
"count": 1,
"is_parallel": true,
"self": 0.07399142699998151
},
"steps_from_proto": {
"total": 0.0016250649999847155,
"count": 1,
"is_parallel": true,
"self": 0.00034524800000212963,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001279816999982586,
"count": 8,
"is_parallel": true,
"self": 0.001279816999982586
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1277.8146298120007,
"count": 63756,
"is_parallel": true,
"self": 33.952900739011284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.034607150987455,
"count": 63756,
"is_parallel": true,
"self": 23.034607150987455
},
"communicator.exchange": {
"total": 1125.8635872029836,
"count": 63756,
"is_parallel": true,
"self": 1125.8635872029836
},
"steps_from_proto": {
"total": 94.96353471901841,
"count": 63756,
"is_parallel": true,
"self": 18.37560034100818,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.58793437801023,
"count": 510048,
"is_parallel": true,
"self": 76.58793437801023
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 614.0952332880013,
"count": 63757,
"self": 2.3368958170063934,
"children": {
"process_trajectory": {
"total": 117.68973525699593,
"count": 63757,
"self": 117.48647625099588,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20325900600005298,
"count": 2,
"self": 0.20325900600005298
}
}
},
"_update_policy": {
"total": 494.06860221399893,
"count": 443,
"self": 296.291460360993,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.77714185300587,
"count": 22854,
"self": 197.77714185300587
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0649996511347126e-06,
"count": 1,
"self": 1.0649996511347126e-06
},
"TrainerController._save_models": {
"total": 0.0721064229996955,
"count": 1,
"self": 0.0013564509999923757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07074997199970312,
"count": 1,
"self": 0.07074997199970312
}
}
}
}
}
}
}