ppo-Pyramids / run_logs /timers.json
CloXD's picture
FirstCommit
5113044
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5536358952522278,
"min": 0.5536358952522278,
"max": 1.4719994068145752,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16600.21875,
"min": 16600.21875,
"max": 44654.57421875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989929.0,
"min": 29952.0,
"max": 989929.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.40102291107177734,
"min": -0.10054679214954376,
"max": 0.46341672539711,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 106.27107238769531,
"min": -24.131229400634766,
"max": 124.19568634033203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.07217061519622803,
"min": -0.013855259865522385,
"max": 0.36753836274147034,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 19.125213623046875,
"min": -3.505380630493164,
"max": 87.1065902709961,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07057982016842633,
"min": 0.06544992738571806,
"max": 0.07327290778491823,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9881174823579687,
"min": 0.5026322048642182,
"max": 1.0467391969577875,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01480424762467876,
"min": 0.00011187936353033155,
"max": 0.015655174725947695,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20725946674550264,
"min": 0.0014544317258943101,
"max": 0.23482762088921544,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.280519001764287e-06,
"min": 7.280519001764287e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010192726602470002,
"min": 0.00010192726602470002,
"max": 0.0031183489605503996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242680714285717,
"min": 0.10242680714285717,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339753000000004,
"min": 1.3691136000000002,
"max": 2.3068781,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002524380335714287,
"min": 0.0002524380335714287,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035341324700000018,
"min": 0.0035341324700000018,
"max": 0.10396101504000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010832107625901699,
"min": 0.0103886304423213,
"max": 0.405208021402359,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15164950489997864,
"min": 0.14915169775485992,
"max": 2.836456060409546,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 467.43939393939394,
"min": 421.77142857142854,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30851.0,
"min": 15984.0,
"max": 32501.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3203848219956413,
"min": -1.0000000521540642,
"max": 1.512190876133514,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 87.14539825171232,
"min": -32.000001668930054,
"max": 99.80459782481194,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3203848219956413,
"min": -1.0000000521540642,
"max": 1.512190876133514,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 87.14539825171232,
"min": -32.000001668930054,
"max": 99.80459782481194,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.052024480067219964,
"min": 0.045472223973566935,
"max": 8.650074330158532,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4336156844365178,
"min": 3.1830556781496853,
"max": 138.4011892825365,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677764328",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677766751"
},
"total": 2423.354337842,
"count": 1,
"self": 0.47528475399985837,
"children": {
"run_training.setup": {
"total": 0.12194868599999609,
"count": 1,
"self": 0.12194868599999609
},
"TrainerController.start_learning": {
"total": 2422.757104402,
"count": 1,
"self": 1.722994369051321,
"children": {
"TrainerController._reset_env": {
"total": 7.761283838000054,
"count": 1,
"self": 7.761283838000054
},
"TrainerController.advance": {
"total": 2413.1758931339486,
"count": 63456,
"self": 1.8854746888787304,
"children": {
"env_step": {
"total": 1598.5845002260514,
"count": 63456,
"self": 1460.346743278019,
"children": {
"SubprocessEnvManager._take_step": {
"total": 137.1061632100966,
"count": 63456,
"self": 5.537586506103025,
"children": {
"TorchPolicy.evaluate": {
"total": 131.56857670399359,
"count": 62579,
"self": 44.77032392993965,
"children": {
"TorchPolicy.sample_actions": {
"total": 86.79825277405394,
"count": 62579,
"self": 86.79825277405394
}
}
}
}
},
"workers": {
"total": 1.131593737935873,
"count": 63456,
"self": 0.0,
"children": {
"worker_root": {
"total": 2416.7842942949533,
"count": 63456,
"is_parallel": true,
"self": 1092.647140573956,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002190231999975367,
"count": 1,
"is_parallel": true,
"self": 0.0007956629999625875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013945690000127797,
"count": 8,
"is_parallel": true,
"self": 0.0013945690000127797
}
}
},
"UnityEnvironment.step": {
"total": 0.052954654999894046,
"count": 1,
"is_parallel": true,
"self": 0.0006355399998483335,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005366660000163392,
"count": 1,
"is_parallel": true,
"self": 0.0005366660000163392
},
"communicator.exchange": {
"total": 0.049987310999995316,
"count": 1,
"is_parallel": true,
"self": 0.049987310999995316
},
"steps_from_proto": {
"total": 0.0017951380000340578,
"count": 1,
"is_parallel": true,
"self": 0.0004531850001967541,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013419529998373037,
"count": 8,
"is_parallel": true,
"self": 0.0013419529998373037
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1324.1371537209973,
"count": 63455,
"is_parallel": true,
"self": 36.157067858092205,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.939201159963886,
"count": 63455,
"is_parallel": true,
"self": 25.939201159963886
},
"communicator.exchange": {
"total": 1156.3458138989974,
"count": 63455,
"is_parallel": true,
"self": 1156.3458138989974
},
"steps_from_proto": {
"total": 105.69507080394374,
"count": 63455,
"is_parallel": true,
"self": 26.214072903965643,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.48099789997809,
"count": 507640,
"is_parallel": true,
"self": 79.48099789997809
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 812.7059182190185,
"count": 63456,
"self": 3.0978785180509476,
"children": {
"process_trajectory": {
"total": 175.09665278697048,
"count": 63456,
"self": 174.83607694197053,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26057584499994846,
"count": 2,
"self": 0.26057584499994846
}
}
},
"_update_policy": {
"total": 634.5113869139971,
"count": 432,
"self": 242.11269413500577,
"children": {
"TorchPPOOptimizer.update": {
"total": 392.3986927789913,
"count": 22884,
"self": 392.3986927789913
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.88000010693213e-07,
"count": 1,
"self": 9.88000010693213e-07
},
"TrainerController._save_models": {
"total": 0.09693207300006179,
"count": 1,
"self": 0.0018300320002708759,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09510204099979092,
"count": 1,
"self": 0.09510204099979092
}
}
}
}
}
}
}