ppo-PyramidsRND / run_logs /timers.json
aiartwork's picture
First Push
264df68
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6218467354774475,
"min": 0.6218467354774475,
"max": 1.3934314250946045,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18754.8984375,
"min": 18754.8984375,
"max": 42271.13671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989919.0,
"min": 29952.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989919.0,
"min": 29952.0,
"max": 989919.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2671336531639099,
"min": -0.10059799998998642,
"max": 0.28871965408325195,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 68.65335083007812,
"min": -24.14352035522461,
"max": 74.2009506225586,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019110295921564102,
"min": 0.00884211715310812,
"max": 0.5354399681091309,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.911345958709717,
"min": 2.2901084423065186,
"max": 126.89927673339844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06866910484539057,
"min": 0.06478401754509282,
"max": 0.07494942640564757,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.961367467835468,
"min": 0.524645984839533,
"max": 1.0499233095176224,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012429652231990726,
"min": 0.00022306238610147016,
"max": 0.015563268748289175,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17401513124787016,
"min": 0.002676748633217642,
"max": 0.21788576247604846,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.377868969314289e-06,
"min": 7.377868969314289e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010329016557040004,
"min": 0.00010329016557040004,
"max": 0.003632338089220699,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10245925714285715,
"min": 0.10245925714285715,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4344296,
"min": 1.3886848,
"max": 2.6107793000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002556797885714287,
"min": 0.0002556797885714287,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035795170400000024,
"min": 0.0035795170400000024,
"max": 0.12109685207000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01767260581254959,
"min": 0.01767260581254959,
"max": 0.6830230951309204,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.24741648137569427,
"min": 0.24741648137569427,
"max": 4.781161785125732,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 567.62,
"min": 542.7222222222222,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28381.0,
"min": 15984.0,
"max": 32722.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0322639763355255,
"min": -1.0000000521540642,
"max": 1.2720444177587826,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 51.613198816776276,
"min": -31.99640166759491,
"max": 68.69039855897427,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0322639763355255,
"min": -1.0000000521540642,
"max": 1.2720444177587826,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 51.613198816776276,
"min": -31.99640166759491,
"max": 68.69039855897427,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10348837595316582,
"min": 0.09968006030518424,
"max": 13.661747723817825,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.174418797658291,
"min": 5.174418797658291,
"max": 218.5879635810852,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679035655",
"python_version": "3.7.12 | packaged by conda-forge | (default, Oct 26 2021, 06:08:53) \n[GCC 9.4.0]",
"command_line_arguments": "/opt/conda/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1679037922"
},
"total": 2266.742272972,
"count": 1,
"self": 0.49110327000016696,
"children": {
"run_training.setup": {
"total": 0.7491374790000123,
"count": 1,
"self": 0.7491374790000123
},
"TrainerController.start_learning": {
"total": 2265.502032223,
"count": 1,
"self": 1.547266790023059,
"children": {
"TrainerController._reset_env": {
"total": 11.262588026999993,
"count": 1,
"self": 11.262588026999993
},
"TrainerController.advance": {
"total": 2252.591634759977,
"count": 63418,
"self": 1.7106322279632877,
"children": {
"env_step": {
"total": 1452.5071269659986,
"count": 63418,
"self": 1343.1635985379844,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.46301962599773,
"count": 63418,
"self": 5.038896621969286,
"children": {
"TorchPolicy.evaluate": {
"total": 103.42412300402844,
"count": 62565,
"self": 36.92165672204845,
"children": {
"TorchPolicy.sample_actions": {
"total": 66.50246628197999,
"count": 62565,
"self": 66.50246628197999
}
}
}
}
},
"workers": {
"total": 0.8805088020165499,
"count": 63418,
"self": 0.0,
"children": {
"worker_root": {
"total": 2260.6228037560104,
"count": 63418,
"is_parallel": true,
"self": 1040.2899536489786,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005989557000020795,
"count": 1,
"is_parallel": true,
"self": 0.0034594349999679253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00253012200005287,
"count": 8,
"is_parallel": true,
"self": 0.00253012200005287
}
}
},
"UnityEnvironment.step": {
"total": 0.06255764099995531,
"count": 1,
"is_parallel": true,
"self": 0.000869822000026943,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006132119999620045,
"count": 1,
"is_parallel": true,
"self": 0.0006132119999620045
},
"communicator.exchange": {
"total": 0.05893959499996981,
"count": 1,
"is_parallel": true,
"self": 0.05893959499996981
},
"steps_from_proto": {
"total": 0.00213501199999655,
"count": 1,
"is_parallel": true,
"self": 0.00062163600000531,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00151337599999124,
"count": 8,
"is_parallel": true,
"self": 0.00151337599999124
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1220.3328501070318,
"count": 63417,
"is_parallel": true,
"self": 35.83505944296962,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.09149548701089,
"count": 63417,
"is_parallel": true,
"self": 28.09149548701089
},
"communicator.exchange": {
"total": 1046.4950268890375,
"count": 63417,
"is_parallel": true,
"self": 1046.4950268890375
},
"steps_from_proto": {
"total": 109.91126828801384,
"count": 63417,
"is_parallel": true,
"self": 27.19845230907532,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.71281597893852,
"count": 507336,
"is_parallel": true,
"self": 82.71281597893852
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 798.3738755660149,
"count": 63418,
"self": 2.8450402530359042,
"children": {
"process_trajectory": {
"total": 190.42466757797757,
"count": 63418,
"self": 190.20282282997738,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22184474800019416,
"count": 2,
"self": 0.22184474800019416
}
}
},
"_update_policy": {
"total": 605.1041677350015,
"count": 450,
"self": 258.9587556690187,
"children": {
"TorchPPOOptimizer.update": {
"total": 346.14541206598284,
"count": 22818,
"self": 346.14541206598284
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.247000000148546e-06,
"count": 1,
"self": 1.247000000148546e-06
},
"TrainerController._save_models": {
"total": 0.1005413990001216,
"count": 1,
"self": 0.0019420700000409852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09859932900008062,
"count": 1,
"self": 0.09859932900008062
}
}
}
}
}
}
}