Pyramids / run_logs /timers.json
aronmal's picture
First commit
1a41f9a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.16183078289031982,
"min": 0.16183078289031982,
"max": 0.19563491642475128,
"count": 11
},
"Pyramids.Policy.Entropy.sum": {
"value": 4857.5126953125,
"min": 4857.5126953125,
"max": 5840.8759765625,
"count": 11
},
"Pyramids.Step.mean": {
"value": 3329990.0,
"min": 3029892.0,
"max": 3329990.0,
"count": 11
},
"Pyramids.Step.sum": {
"value": 3329990.0,
"min": 3029892.0,
"max": 3329990.0,
"count": 11
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7569361329078674,
"min": 0.6572807431221008,
"max": 0.8241286277770996,
"count": 11
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 223.29615783691406,
"min": 184.69589233398438,
"max": 249.71096801757812,
"count": 11
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008521034382283688,
"min": -0.002375907264649868,
"max": 0.010143999010324478,
"count": 11
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.513705253601074,
"min": -0.7198998928070068,
"max": 2.9924798011779785,
"count": 11
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 242.45378151260505,
"min": 226.7923076923077,
"max": 298.35353535353534,
"count": 11
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28852.0,
"min": 26690.0,
"max": 31380.0,
"count": 11
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7085299838334322,
"min": 1.641022202643481,
"max": 1.7736259428144412,
"count": 11
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 205.02359806001186,
"min": 162.46119806170464,
"max": 232.3449985086918,
"count": 11
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7085299838334322,
"min": 1.641022202643481,
"max": 1.7736259428144412,
"count": 11
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 205.02359806001186,
"min": 162.46119806170464,
"max": 232.3449985086918,
"count": 11
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02120728931058693,
"min": 0.020747624346282145,
"max": 0.027397025733888667,
"count": 11
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.5448747172704316,
"min": 2.4354529173870105,
"max": 2.9356366415158845,
"count": 11
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07088167101336534,
"min": 0.06472716507241351,
"max": 0.072778828392449,
"count": 11
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0632250652004802,
"min": 0.9061803110137892,
"max": 1.0632250652004802,
"count": 11
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015138761240424396,
"min": 0.01235295686984457,
"max": 0.015479897946490727,
"count": 11
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22708141860636594,
"min": 0.17294139617782398,
"max": 0.22708141860636594,
"count": 11
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.584155186237143e-05,
"min": 1.584155186237143e-05,
"max": 4.150687391950612e-05,
"count": 11
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00023762327793557143,
"min": 0.00023762327793557143,
"max": 0.0005810962348730857,
"count": 11
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10528048571428572,
"min": 0.10528048571428572,
"max": 0.11383559591836735,
"count": 11
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5792072857142858,
"min": 1.4862316000000002,
"max": 1.643657685714286,
"count": 11
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000537520522857143,
"min": 0.000537520522857143,
"max": 0.0013921760322448981,
"count": 11
},
"Pyramids.Policy.Beta.sum": {
"value": 0.008062807842857145,
"min": 0.008062807842857145,
"max": 0.019490464451428574,
"count": 11
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008326766081154346,
"min": 0.008326766081154346,
"max": 0.009064115583896637,
"count": 11
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12490149587392807,
"min": 0.12157151103019714,
"max": 0.13421687483787537,
"count": 11
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 11
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 11
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688730910",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688731874"
},
"total": 963.2737938869996,
"count": 1,
"self": 0.28846379599963257,
"children": {
"run_training.setup": {
"total": 0.0411267620002036,
"count": 1,
"self": 0.0411267620002036
},
"TrainerController.start_learning": {
"total": 962.9442033289997,
"count": 1,
"self": 0.6229140661216661,
"children": {
"TrainerController._reset_env": {
"total": 4.1584555089993955,
"count": 1,
"self": 4.1584555089993955
},
"TrainerController.advance": {
"total": 957.9727760298774,
"count": 22002,
"self": 0.6274250227652374,
"children": {
"env_step": {
"total": 730.2751400471425,
"count": 22002,
"self": 685.6130119412119,
"children": {
"SubprocessEnvManager._take_step": {
"total": 44.30485008307005,
"count": 22002,
"self": 1.9146209049231402,
"children": {
"TorchPolicy.evaluate": {
"total": 42.39022917814691,
"count": 20933,
"self": 42.39022917814691
}
}
},
"workers": {
"total": 0.3572780228605552,
"count": 22002,
"self": 0.0,
"children": {
"worker_root": {
"total": 959.8345618770909,
"count": 22002,
"is_parallel": true,
"self": 321.63205124029446,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001999586000238196,
"count": 1,
"is_parallel": true,
"self": 0.0006654319986409973,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013341540015971987,
"count": 8,
"is_parallel": true,
"self": 0.0013341540015971987
}
}
},
"UnityEnvironment.step": {
"total": 0.055308815999524086,
"count": 1,
"is_parallel": true,
"self": 0.0006113979998190189,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005261489986878587,
"count": 1,
"is_parallel": true,
"self": 0.0005261489986878587
},
"communicator.exchange": {
"total": 0.052172805000736844,
"count": 1,
"is_parallel": true,
"self": 0.052172805000736844
},
"steps_from_proto": {
"total": 0.0019984640002803644,
"count": 1,
"is_parallel": true,
"self": 0.00040776100286166184,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015907029974187026,
"count": 8,
"is_parallel": true,
"self": 0.0015907029974187026
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 638.2025106367964,
"count": 22001,
"is_parallel": true,
"self": 12.518055500813716,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.825988795009835,
"count": 22001,
"is_parallel": true,
"self": 8.825988795009835
},
"communicator.exchange": {
"total": 576.9878752659479,
"count": 22001,
"is_parallel": true,
"self": 576.9878752659479
},
"steps_from_proto": {
"total": 39.870591075025004,
"count": 22001,
"is_parallel": true,
"self": 8.422388295182827,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.448202779842177,
"count": 176008,
"is_parallel": true,
"self": 31.448202779842177
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 227.0702109599697,
"count": 22002,
"self": 1.132208885936052,
"children": {
"process_trajectory": {
"total": 42.091502506016695,
"count": 22002,
"self": 42.091502506016695
},
"_update_policy": {
"total": 183.84649956801695,
"count": 158,
"self": 116.91646065898931,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.93003890902764,
"count": 7590,
"self": 66.93003890902764
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3980006769998e-06,
"count": 1,
"self": 1.3980006769998e-06
},
"TrainerController._save_models": {
"total": 0.19005632600055833,
"count": 1,
"self": 0.003046071000426309,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18701025500013202,
"count": 1,
"self": 0.18701025500013202
}
}
}
}
}
}
}