Pyramids / run_logs /timers.json
Senura's picture
Initial Commit
429bbdc
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.43518364429473877,
"min": 0.43518364429473877,
"max": 1.4008835554122925,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13097.287109375,
"min": 13097.287109375,
"max": 42497.203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6261143088340759,
"min": -0.07144560664892197,
"max": 0.6623799800872803,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 179.06869506835938,
"min": -17.21839141845703,
"max": 189.440673828125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.022230735048651695,
"min": -0.07831678539514542,
"max": 0.49758803844451904,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.357990264892578,
"min": -19.970779418945312,
"max": 117.9283676147461,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06751873690081041,
"min": 0.06463483799606332,
"max": 0.0752032546532722,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0127810535121562,
"min": 0.5062746874367198,
"max": 1.0692180487676524,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014756779075414552,
"min": 0.0013953093179874855,
"max": 0.016469633328337556,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22135168613121828,
"min": 0.019362754656232748,
"max": 0.23057486659672577,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.519157493646671e-06,
"min": 7.519157493646671e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011278736240470007,
"min": 0.00011278736240470007,
"max": 0.003633582788805799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250635333333333,
"min": 0.10250635333333333,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375953,
"min": 1.3886848,
"max": 2.6111941999999995,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002603846980000002,
"min": 0.0002603846980000002,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003905770470000003,
"min": 0.003905770470000003,
"max": 0.12113830058,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013969703577458858,
"min": 0.013969703577458858,
"max": 0.6064023375511169,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2095455527305603,
"min": 0.1996900886297226,
"max": 4.244816303253174,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 297.1326530612245,
"min": 288.95327102803736,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29119.0,
"min": 15984.0,
"max": 33373.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6416244739476515,
"min": -1.0000000521540642,
"max": 1.6777312988885726,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 160.87919844686985,
"min": -29.404801733791828,
"max": 175.07899793982506,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6416244739476515,
"min": -1.0000000521540642,
"max": 1.6777312988885726,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 160.87919844686985,
"min": -29.404801733791828,
"max": 175.07899793982506,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.042875321748267324,
"min": 0.042875321748267324,
"max": 13.174597572535276,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.201781531330198,
"min": 4.201781531330198,
"max": 210.79356116056442,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678517802",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0",
"mlagents_envs_version": "0.29.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678520316"
},
"total": 2514.3380332139996,
"count": 1,
"self": 0.6724436009999408,
"children": {
"run_training.setup": {
"total": 0.12496005599950877,
"count": 1,
"self": 0.12496005599950877
},
"TrainerController.start_learning": {
"total": 2513.540629557,
"count": 1,
"self": 1.5455442210659385,
"children": {
"TrainerController._reset_env": {
"total": 6.757858976000534,
"count": 1,
"self": 6.757858976000534
},
"TrainerController.advance": {
"total": 2505.1529272579337,
"count": 64028,
"self": 1.6249412708812088,
"children": {
"env_step": {
"total": 1647.9095378310503,
"count": 64028,
"self": 1515.5989324909842,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.39720295402367,
"count": 64028,
"self": 6.590383829986422,
"children": {
"TorchPolicy.evaluate": {
"total": 124.80681912403725,
"count": 62561,
"self": 41.04265436314745,
"children": {
"TorchPolicy.sample_actions": {
"total": 83.7641647608898,
"count": 62561,
"self": 83.7641647608898
}
}
}
}
},
"workers": {
"total": 0.913402386042435,
"count": 64028,
"self": 0.0,
"children": {
"worker_root": {
"total": 2507.9848587318174,
"count": 64028,
"is_parallel": true,
"self": 1116.5606273633903,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001878887000202667,
"count": 1,
"is_parallel": true,
"self": 0.0007088290003594011,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011700579998432659,
"count": 8,
"is_parallel": true,
"self": 0.0011700579998432659
}
}
},
"UnityEnvironment.step": {
"total": 0.10364535700045963,
"count": 1,
"is_parallel": true,
"self": 0.0005326320015228703,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048392999997304287,
"count": 1,
"is_parallel": true,
"self": 0.00048392999997304287
},
"communicator.exchange": {
"total": 0.1008737829997699,
"count": 1,
"is_parallel": true,
"self": 0.1008737829997699
},
"steps_from_proto": {
"total": 0.0017550119991938118,
"count": 1,
"is_parallel": true,
"self": 0.00041607699949963717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013389349996941746,
"count": 8,
"is_parallel": true,
"self": 0.0013389349996941746
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1391.4242313684272,
"count": 64027,
"is_parallel": true,
"self": 31.564497749066504,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.458877300233326,
"count": 64027,
"is_parallel": true,
"self": 22.458877300233326
},
"communicator.exchange": {
"total": 1240.6487317851634,
"count": 64027,
"is_parallel": true,
"self": 1240.6487317851634
},
"steps_from_proto": {
"total": 96.75212453396398,
"count": 64027,
"is_parallel": true,
"self": 22.838512297434136,
"children": {
"_process_rank_one_or_two_observation": {
"total": 73.91361223652984,
"count": 512216,
"is_parallel": true,
"self": 73.91361223652984
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 855.6184481560022,
"count": 64028,
"self": 3.008347457097443,
"children": {
"process_trajectory": {
"total": 188.74315739190297,
"count": 64028,
"self": 188.54180956590153,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20134782600143808,
"count": 2,
"self": 0.20134782600143808
}
}
},
"_update_policy": {
"total": 663.8669433070017,
"count": 456,
"self": 264.6856807590075,
"children": {
"TorchPPOOptimizer.update": {
"total": 399.18126254799427,
"count": 22764,
"self": 399.18126254799427
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1499996617203578e-06,
"count": 1,
"self": 1.1499996617203578e-06
},
"TrainerController._save_models": {
"total": 0.08429795200027002,
"count": 1,
"self": 0.0013486190000548959,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08294933300021512,
"count": 1,
"self": 0.08294933300021512
}
}
}
}
}
}
}