ppo-Pyramids / run_logs /timers.json
CodyKilpatrick's picture
First Push
17ba142
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6470933556556702,
"min": 0.5685675740242004,
"max": 1.507969617843628,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19236.791015625,
"min": 16984.25,
"max": 45745.765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989910.0,
"min": 29952.0,
"max": 989910.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989910.0,
"min": 29952.0,
"max": 989910.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2979535162448883,
"min": -0.16834713518619537,
"max": 0.2979535162448883,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 76.57405090332031,
"min": -39.89826965332031,
"max": 76.57405090332031,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.025349557399749756,
"min": -0.025349557399749756,
"max": 0.25741860270500183,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -6.514836311340332,
"min": -6.514836311340332,
"max": 61.78046417236328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07188226819990258,
"min": 0.06446526338164833,
"max": 0.07341709901906122,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.006351754798636,
"min": 0.5062338405368373,
"max": 1.0471331936241055,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012349764262318317,
"min": 4.5875234415147196e-05,
"max": 0.012349764262318317,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.17289669967245644,
"min": 0.0006422532818120607,
"max": 0.17289669967245644,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.517904636921427e-06,
"min": 7.517904636921427e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010525066491689998,
"min": 0.00010525066491689998,
"max": 0.0032538437153854993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025059357142857,
"min": 0.1025059357142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350831,
"min": 1.3691136000000002,
"max": 2.4003799000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002603429778571428,
"min": 0.0002603429778571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036448016899999994,
"min": 0.0036448016899999994,
"max": 0.10848298855,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011332578957080841,
"min": 0.011332578957080841,
"max": 0.42844635248184204,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15865610539913177,
"min": 0.15865610539913177,
"max": 2.999124526977539,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 565.25,
"min": 552.7636363636364,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31654.0,
"min": 15984.0,
"max": 32727.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1379017878662456,
"min": -1.0000000521540642,
"max": 1.1379017878662456,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 62.58459833264351,
"min": -32.000001668930054,
"max": 62.58459833264351,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1379017878662456,
"min": -1.0000000521540642,
"max": 1.1379017878662456,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 62.58459833264351,
"min": -32.000001668930054,
"max": 62.58459833264351,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06665856012735855,
"min": 0.06665856012735855,
"max": 8.341605023480952,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.66622080700472,
"min": 3.524839514604537,
"max": 133.46568037569523,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686600770",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686602789"
},
"total": 2019.2556495639997,
"count": 1,
"self": 0.4744284409998727,
"children": {
"run_training.setup": {
"total": 0.03717722200008211,
"count": 1,
"self": 0.03717722200008211
},
"TrainerController.start_learning": {
"total": 2018.7440439009997,
"count": 1,
"self": 1.2269298569117382,
"children": {
"TrainerController._reset_env": {
"total": 5.353851383999881,
"count": 1,
"self": 5.353851383999881
},
"TrainerController.advance": {
"total": 2012.0727420220887,
"count": 63347,
"self": 1.296532454025055,
"children": {
"env_step": {
"total": 1396.8224617259589,
"count": 63347,
"self": 1293.7689100669077,
"children": {
"SubprocessEnvManager._take_step": {
"total": 102.32463929802088,
"count": 63347,
"self": 4.5050548930967125,
"children": {
"TorchPolicy.evaluate": {
"total": 97.81958440492417,
"count": 62545,
"self": 97.81958440492417
}
}
},
"workers": {
"total": 0.728912361030325,
"count": 63347,
"self": 0.0,
"children": {
"worker_root": {
"total": 2014.6325147479033,
"count": 63347,
"is_parallel": true,
"self": 824.8980773329586,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025093300000662566,
"count": 1,
"is_parallel": true,
"self": 0.0007020000002739835,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018073299997922732,
"count": 8,
"is_parallel": true,
"self": 0.0018073299997922732
}
}
},
"UnityEnvironment.step": {
"total": 0.0628960859999097,
"count": 1,
"is_parallel": true,
"self": 0.0005667399996127642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005437660001916811,
"count": 1,
"is_parallel": true,
"self": 0.0005437660001916811
},
"communicator.exchange": {
"total": 0.05997727900012251,
"count": 1,
"is_parallel": true,
"self": 0.05997727900012251
},
"steps_from_proto": {
"total": 0.0018083009999827482,
"count": 1,
"is_parallel": true,
"self": 0.00034545700009402935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014628439998887188,
"count": 8,
"is_parallel": true,
"self": 0.0014628439998887188
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1189.7344374149448,
"count": 63346,
"is_parallel": true,
"self": 31.702082403872282,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.925279990086437,
"count": 63346,
"is_parallel": true,
"self": 21.925279990086437
},
"communicator.exchange": {
"total": 1040.5873098739567,
"count": 63346,
"is_parallel": true,
"self": 1040.5873098739567
},
"steps_from_proto": {
"total": 95.51976514702937,
"count": 63346,
"is_parallel": true,
"self": 18.343360087977544,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.17640505905183,
"count": 506768,
"is_parallel": true,
"self": 77.17640505905183
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 613.9537478421048,
"count": 63347,
"self": 2.2691725380814205,
"children": {
"process_trajectory": {
"total": 101.6078569140218,
"count": 63347,
"self": 101.40587221002215,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20198470399964208,
"count": 2,
"self": 0.20198470399964208
}
}
},
"_update_policy": {
"total": 510.0767183900016,
"count": 438,
"self": 325.00647013302796,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.07024825697363,
"count": 22884,
"self": 185.07024825697363
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.319993867189623e-07,
"count": 1,
"self": 8.319993867189623e-07
},
"TrainerController._save_models": {
"total": 0.09051980599997478,
"count": 1,
"self": 0.001350204999653215,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08916960100032156,
"count": 1,
"self": 0.08916960100032156
}
}
}
}
}
}
}