ppo-PyramidsRND / run_logs /timers.json
dungtd2403's picture
First Push
281a638
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6560380458831787,
"min": 0.6560380458831787,
"max": 1.4017127752304077,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19492.203125,
"min": 19492.203125,
"max": 42522.359375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989881.0,
"min": 29948.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989881.0,
"min": 29948.0,
"max": 989881.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.09013891965150833,
"min": -0.11666567623615265,
"max": 0.14326931536197662,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 22.084035873413086,
"min": -27.9997615814209,
"max": 36.247135162353516,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012801336124539375,
"min": 0.006338948849588633,
"max": 0.4162593483924866,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.1363272666931152,
"min": 1.5720592737197876,
"max": 98.6534652709961,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06629849642547628,
"min": 0.06518678693594113,
"max": 0.07516176229089458,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9281789499566679,
"min": 0.6012940983271566,
"max": 1.057573446746338,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0063609028096381194,
"min": 0.00011181893915493667,
"max": 0.03125639053501501,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.08905263933493367,
"min": 0.0014536462090141767,
"max": 0.2500511242801201,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.604026036785717e-06,
"min": 7.604026036785717e-06,
"max": 0.00029484045171985,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010645636451500004,
"min": 0.00010645636451500004,
"max": 0.003608721097093,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253464285714285,
"min": 0.10253464285714285,
"max": 0.19828015,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.435485,
"min": 1.435485,
"max": 2.5274802000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026321082142857153,
"min": 0.00026321082142857153,
"max": 0.009828186985000001,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036849515000000016,
"min": 0.0036849515000000016,
"max": 0.12030040930000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010480760596692562,
"min": 0.010480760596692562,
"max": 0.5303505659103394,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14673064649105072,
"min": 0.14673064649105072,
"max": 4.242804527282715,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 773.3333333333334,
"min": 714.1333333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30160.0,
"min": 16491.0,
"max": 34685.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.30331791115876955,
"min": -0.9999467189113299,
"max": 0.6416420660128719,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 11.829398535192013,
"min": -30.994801610708237,
"max": 24.382398508489132,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.30331791115876955,
"min": -0.9999467189113299,
"max": 0.6416420660128719,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 11.829398535192013,
"min": -30.994801610708237,
"max": 24.382398508489132,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08351557242134824,
"min": 0.08269581587002095,
"max": 12.424250781536102,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2571073244325817,
"min": 3.2137569491169415,
"max": 211.21226328611374,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1680807173",
"python_version": "3.9.0 (default, Nov 15 2020, 14:28:56) \n[GCC 7.3.0]",
"command_line_arguments": "/home/dung/.conda/envs/ml_agent/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu111",
"numpy_version": "1.23.2",
"end_time_seconds": "1680808177"
},
"total": 1003.3268506469994,
"count": 1,
"self": 0.27065846899859025,
"children": {
"run_training.setup": {
"total": 0.024129262999849743,
"count": 1,
"self": 0.024129262999849743
},
"TrainerController.start_learning": {
"total": 1003.032062915001,
"count": 1,
"self": 1.069122301905736,
"children": {
"TrainerController._reset_env": {
"total": 3.2398313869998674,
"count": 1,
"self": 3.2398313869998674
},
"TrainerController.advance": {
"total": 998.6700169320957,
"count": 63132,
"self": 1.0230685939604882,
"children": {
"env_step": {
"total": 572.5134690010145,
"count": 63132,
"self": 494.338611009689,
"children": {
"SubprocessEnvManager._take_step": {
"total": 77.52929931002836,
"count": 63132,
"self": 3.5194005421471957,
"children": {
"TorchPolicy.evaluate": {
"total": 74.00989876788117,
"count": 62553,
"self": 23.585047056900294,
"children": {
"TorchPolicy.sample_actions": {
"total": 50.424851710980874,
"count": 62553,
"self": 50.424851710980874
}
}
}
}
},
"workers": {
"total": 0.6455586812971887,
"count": 63132,
"self": 0.0,
"children": {
"worker_root": {
"total": 1001.6000257607193,
"count": 63132,
"is_parallel": true,
"self": 570.8210547665876,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011372600001777755,
"count": 1,
"is_parallel": true,
"self": 0.0004160550051892642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007212049949885113,
"count": 8,
"is_parallel": true,
"self": 0.0007212049949885113
}
}
},
"UnityEnvironment.step": {
"total": 0.02163860200016643,
"count": 1,
"is_parallel": true,
"self": 0.00036406599974725395,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036105300023336895,
"count": 1,
"is_parallel": true,
"self": 0.00036105300023336895
},
"communicator.exchange": {
"total": 0.019904210999811767,
"count": 1,
"is_parallel": true,
"self": 0.019904210999811767
},
"steps_from_proto": {
"total": 0.0010092720003740396,
"count": 1,
"is_parallel": true,
"self": 0.00026374600020062644,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007455260001734132,
"count": 8,
"is_parallel": true,
"self": 0.0007455260001734132
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 430.77897099413167,
"count": 63131,
"is_parallel": true,
"self": 13.727648365413188,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.873472600898822,
"count": 63131,
"is_parallel": true,
"self": 9.873472600898822
},
"communicator.exchange": {
"total": 370.6416153721566,
"count": 63131,
"is_parallel": true,
"self": 370.6416153721566
},
"steps_from_proto": {
"total": 36.536234655663065,
"count": 63131,
"is_parallel": true,
"self": 9.839611810943097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.69662284471997,
"count": 505048,
"is_parallel": true,
"self": 26.69662284471997
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 425.13347933712066,
"count": 63132,
"self": 1.792394934080221,
"children": {
"process_trajectory": {
"total": 95.29200338601731,
"count": 63132,
"self": 95.11291203101791,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1790913549994002,
"count": 2,
"self": 0.1790913549994002
}
}
},
"_update_policy": {
"total": 328.0490810170231,
"count": 448,
"self": 132.46654273384775,
"children": {
"TorchPPOOptimizer.update": {
"total": 195.58253828317538,
"count": 22833,
"self": 195.58253828317538
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.569986675865948e-07,
"count": 1,
"self": 8.569986675865948e-07
},
"TrainerController._save_models": {
"total": 0.053091437001057784,
"count": 1,
"self": 0.0008610670010966714,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05223036999996111,
"count": 1,
"self": 0.05223036999996111
}
}
}
}
}
}
}