ppo-Pyramids / run_logs /timers.json
atorre's picture
First training of PPO agent on Pyramids environment.
f02fdbf
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3690907955169678,
"min": 0.3690907955169678,
"max": 0.46054497361183167,
"count": 7
},
"Pyramids.Policy.Entropy.sum": {
"value": 11078.62890625,
"min": 11078.62890625,
"max": 13823.7177734375,
"count": 7
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 301.4742268041237,
"min": 278.1636363636364,
"max": 332.9625,
"count": 7
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29243.0,
"min": 26637.0,
"max": 30793.0,
"count": 7
},
"Pyramids.Step.mean": {
"value": 989899.0,
"min": 809943.0,
"max": 989899.0,
"count": 7
},
"Pyramids.Step.sum": {
"value": 989899.0,
"min": 809943.0,
"max": 989899.0,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6117339730262756,
"min": 0.5805515646934509,
"max": 0.652145504951477,
"count": 7
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 175.56764221191406,
"min": 156.11248779296875,
"max": 185.86146545410156,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005026842933148146,
"min": -0.017886633053421974,
"max": 0.005214397795498371,
"count": 7
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.4427039623260498,
"min": -5.097690582275391,
"max": 1.4427039623260498,
"count": 7
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6561877459895855,
"min": 1.6482744545061538,
"max": 1.723315299631239,
"count": 7
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 162.30639910697937,
"min": 133.3629986345768,
"max": 191.28799825906754,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6561877459895855,
"min": 1.6482744545061538,
"max": 1.723315299631239,
"count": 7
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 162.30639910697937,
"min": 133.3629986345768,
"max": 191.28799825906754,
"count": 7
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03455416573602373,
"min": 0.03330021994193672,
"max": 0.041914856773655626,
"count": 7
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.386308242130326,
"min": 3.3531885418924503,
"max": 3.8358080207544845,
"count": 7
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06926667617969977,
"min": 0.0656328608889626,
"max": 0.07174140105967243,
"count": 7
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9697334665157968,
"min": 0.9188600524454764,
"max": 1.0351087275679067,
"count": 7
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01623379010372079,
"min": 0.014796004913827905,
"max": 0.016680958969421532,
"count": 7
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22727306145209109,
"min": 0.19234806387976278,
"max": 0.23698257621920976,
"count": 7
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.77710455052143e-06,
"min": 7.77710455052143e-06,
"max": 6.129377187646153e-05,
"count": 7
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010887946370730003,
"min": 0.00010887946370730003,
"max": 0.000796819034394,
"count": 7
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025923357142857,
"min": 0.1025923357142857,
"max": 0.12043123076923078,
"count": 7
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4362926999999999,
"min": 1.4362926999999999,
"max": 1.7173414,
"count": 7
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026897433785714304,
"min": 0.00026897433785714304,
"max": 0.002051079953846154,
"count": 7
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037656407300000025,
"min": 0.0037656407300000025,
"max": 0.026664039400000002,
"count": 7
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011005385778844357,
"min": 0.011005385778844357,
"max": 0.012201069854199886,
"count": 7
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15407539904117584,
"min": 0.15407539904117584,
"max": 0.18288809061050415,
"count": 7
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673946849",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673947632"
},
"total": 782.9304163909992,
"count": 1,
"self": 1.513138227998752,
"children": {
"run_training.setup": {
"total": 0.1443556410004021,
"count": 1,
"self": 0.1443556410004021
},
"TrainerController.start_learning": {
"total": 781.272922522,
"count": 1,
"self": 0.47133834608575853,
"children": {
"TrainerController._reset_env": {
"total": 5.516701714999726,
"count": 1,
"self": 5.516701714999726
},
"TrainerController.advance": {
"total": 775.1072918309155,
"count": 14322,
"self": 0.5090716710265042,
"children": {
"env_step": {
"total": 521.0917065080475,
"count": 14322,
"self": 492.06629796500874,
"children": {
"SubprocessEnvManager._take_step": {
"total": 28.704852470091282,
"count": 14322,
"self": 1.390311448074499,
"children": {
"TorchPolicy.evaluate": {
"total": 27.314541022016783,
"count": 13757,
"self": 6.127063125147288,
"children": {
"TorchPolicy.sample_actions": {
"total": 21.187477896869495,
"count": 13757,
"self": 21.187477896869495
}
}
}
}
},
"workers": {
"total": 0.320556072947511,
"count": 14322,
"self": 0.0,
"children": {
"worker_root": {
"total": 779.7088484159231,
"count": 14322,
"is_parallel": true,
"self": 322.68404762594946,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021394810000856523,
"count": 1,
"is_parallel": true,
"self": 0.0006972569999561529,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014422240001294995,
"count": 8,
"is_parallel": true,
"self": 0.0014422240001294995
}
}
},
"UnityEnvironment.step": {
"total": 0.06311001700032648,
"count": 1,
"is_parallel": true,
"self": 0.0006312870018518879,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048145399978238856,
"count": 1,
"is_parallel": true,
"self": 0.00048145399978238856
},
"communicator.exchange": {
"total": 0.06000535999919521,
"count": 1,
"is_parallel": true,
"self": 0.06000535999919521
},
"steps_from_proto": {
"total": 0.001991915999496996,
"count": 1,
"is_parallel": true,
"self": 0.0004883079973296844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015036080021673115,
"count": 8,
"is_parallel": true,
"self": 0.0015036080021673115
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 457.0248007899736,
"count": 14321,
"is_parallel": true,
"self": 8.895754788175509,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.460690289960439,
"count": 14321,
"is_parallel": true,
"self": 5.460690289960439
},
"communicator.exchange": {
"total": 411.88394966486885,
"count": 14321,
"is_parallel": true,
"self": 411.88394966486885
},
"steps_from_proto": {
"total": 30.784406046968797,
"count": 14321,
"is_parallel": true,
"self": 7.581577122730778,
"children": {
"_process_rank_one_or_two_observation": {
"total": 23.20282892423802,
"count": 114568,
"is_parallel": true,
"self": 23.20282892423802
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 253.5065136518415,
"count": 14322,
"self": 0.9878365628892425,
"children": {
"process_trajectory": {
"total": 44.71119632395403,
"count": 14322,
"self": 44.50845595095507,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20274037299896008,
"count": 1,
"self": 0.20274037299896008
}
}
},
"_update_policy": {
"total": 207.80748076499822,
"count": 103,
"self": 54.752951763956844,
"children": {
"TorchPPOOptimizer.update": {
"total": 153.05452900104137,
"count": 4962,
"self": 153.05452900104137
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.2549993445863947e-06,
"count": 1,
"self": 2.2549993445863947e-06
},
"TrainerController._save_models": {
"total": 0.1775883749996865,
"count": 1,
"self": 0.007343440000113333,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17024493499957316,
"count": 1,
"self": 0.17024493499957316
}
}
}
}
}
}
}