ppo-Pyramids / run_logs /timers.json
wywang's picture
Default parameter in PyramidsRND with modified training steps
ec0addb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.32625505328178406,
"min": 0.32625505328178406,
"max": 1.4632600545883179,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9855.5126953125,
"min": 9815.5048828125,
"max": 44389.45703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989999.0,
"min": 29952.0,
"max": 989999.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989999.0,
"min": 29952.0,
"max": 989999.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.24641400575637817,
"min": -0.14500002562999725,
"max": 0.2841465473175049,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 62.8355712890625,
"min": -34.94500732421875,
"max": 72.74151611328125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.27104660868644714,
"min": -0.27104660868644714,
"max": 0.38836491107940674,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -69.11688232421875,
"min": -69.11688232421875,
"max": 92.04248046875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06710461822097256,
"min": 0.06407377053519654,
"max": 0.07282707416466326,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9394646550936159,
"min": 0.47663762959890504,
"max": 1.0195790383052856,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.030380749334220865,
"min": 0.0003874038965476482,
"max": 0.030380749334220865,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.4253304906790921,
"min": 0.004702409028595866,
"max": 0.4253304906790921,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.63372602688571e-06,
"min": 7.63372602688571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010687216437639996,
"min": 0.00010687216437639996,
"max": 0.0035072330309224003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254454285714286,
"min": 0.10254454285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356236,
"min": 1.3691136000000002,
"max": 2.5690776,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002641998314285714,
"min": 0.0002641998314285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00369879764,
"min": 0.00369879764,
"max": 0.11693085224,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010915083810687065,
"min": 0.010915083810687065,
"max": 0.528436541557312,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1528111696243286,
"min": 0.1528111696243286,
"max": 3.6990559101104736,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 578.0,
"min": 568.98,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29478.0,
"min": 15984.0,
"max": 32333.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.029729378281855,
"min": -1.0000000521540642,
"max": 1.123003896106692,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 52.51619829237461,
"min": -32.000001668930054,
"max": 57.27319870144129,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.029729378281855,
"min": -1.0000000521540642,
"max": 1.123003896106692,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 52.51619829237461,
"min": -32.000001668930054,
"max": 57.27319870144129,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06601920983224523,
"min": 0.06601920983224523,
"max": 10.769413832575083,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3669797014445066,
"min": 3.3669797014445066,
"max": 172.31062132120132,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711512114",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711514479"
},
"total": 2365.5501715929995,
"count": 1,
"self": 0.48919144999945274,
"children": {
"run_training.setup": {
"total": 0.07516258900000139,
"count": 1,
"self": 0.07516258900000139
},
"TrainerController.start_learning": {
"total": 2364.9858175540003,
"count": 1,
"self": 1.9399390259532083,
"children": {
"TrainerController._reset_env": {
"total": 2.7314399370000046,
"count": 1,
"self": 2.7314399370000046
},
"TrainerController.advance": {
"total": 2360.2219544790464,
"count": 63413,
"self": 1.9792104879811632,
"children": {
"env_step": {
"total": 1693.7878072830213,
"count": 63413,
"self": 1527.9226353090914,
"children": {
"SubprocessEnvManager._take_step": {
"total": 164.7072971670134,
"count": 63413,
"self": 5.740313942974126,
"children": {
"TorchPolicy.evaluate": {
"total": 158.96698322403927,
"count": 62561,
"self": 158.96698322403927
}
}
},
"workers": {
"total": 1.1578748069164249,
"count": 63413,
"self": 0.0,
"children": {
"worker_root": {
"total": 2358.716707561015,
"count": 63413,
"is_parallel": true,
"self": 980.7405752680345,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003184166999972149,
"count": 1,
"is_parallel": true,
"self": 0.0008626329995422566,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0023215340004298923,
"count": 8,
"is_parallel": true,
"self": 0.0023215340004298923
}
}
},
"UnityEnvironment.step": {
"total": 0.05512594199990417,
"count": 1,
"is_parallel": true,
"self": 0.0006765329999325331,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004719139999451727,
"count": 1,
"is_parallel": true,
"self": 0.0004719139999451727
},
"communicator.exchange": {
"total": 0.052132164000113335,
"count": 1,
"is_parallel": true,
"self": 0.052132164000113335
},
"steps_from_proto": {
"total": 0.0018453309999131307,
"count": 1,
"is_parallel": true,
"self": 0.0004080879996308795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014372430002822512,
"count": 8,
"is_parallel": true,
"self": 0.0014372430002822512
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1377.9761322929805,
"count": 63412,
"is_parallel": true,
"self": 40.03888499999903,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.913946678012735,
"count": 63412,
"is_parallel": true,
"self": 26.913946678012735
},
"communicator.exchange": {
"total": 1194.470912977984,
"count": 63412,
"is_parallel": true,
"self": 1194.470912977984
},
"steps_from_proto": {
"total": 116.5523876369848,
"count": 63412,
"is_parallel": true,
"self": 24.943769079838376,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.60861855714643,
"count": 507296,
"is_parallel": true,
"self": 91.60861855714643
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 664.4549367080442,
"count": 63413,
"self": 3.4356319379678553,
"children": {
"process_trajectory": {
"total": 136.30736966207587,
"count": 63413,
"self": 136.04871037207567,
"children": {
"RLTrainer._checkpoint": {
"total": 0.25865929000019605,
"count": 2,
"self": 0.25865929000019605
}
}
},
"_update_policy": {
"total": 524.7119351080005,
"count": 439,
"self": 308.45282259501914,
"children": {
"TorchPPOOptimizer.update": {
"total": 216.25911251298135,
"count": 22821,
"self": 216.25911251298135
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2159998732386157e-06,
"count": 1,
"self": 1.2159998732386157e-06
},
"TrainerController._save_models": {
"total": 0.09248289600054704,
"count": 1,
"self": 0.0015630940006303717,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09091980199991667,
"count": 1,
"self": 0.09091980199991667
}
}
}
}
}
}
}