mcamara's picture
First Push
990b74e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6166240572929382,
"min": 0.6166240572929382,
"max": 1.4610559940338135,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18745.37109375,
"min": 18745.37109375,
"max": 44322.59375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29982.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29982.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.31459179520606995,
"min": -0.11372945457696915,
"max": 0.31459179520606995,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 81.47927856445312,
"min": -27.295068740844727,
"max": 81.47927856445312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.1749652922153473,
"min": -0.1749652922153473,
"max": 0.28640449047088623,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -45.316009521484375,
"min": -45.316009521484375,
"max": 69.02348327636719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06792416374914216,
"min": 0.06606042983353085,
"max": 0.07335995397165261,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9509382924879902,
"min": 0.5602740814976843,
"max": 1.039693661331423,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014265168372644224,
"min": 0.0005642948541346257,
"max": 0.015190601559870212,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19971235721701913,
"min": 0.006207243395480882,
"max": 0.2278590233980532,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.406347531249999e-06,
"min": 7.406347531249999e-06,
"max": 0.00029506995164334997,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010368886543749998,
"min": 0.00010368886543749998,
"max": 0.0035070065309978997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246875000000001,
"min": 0.10246875000000001,
"max": 0.19835665,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345625000000002,
"min": 1.4345625000000002,
"max": 2.5690020999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025662812500000005,
"min": 0.00025662812500000005,
"max": 0.009835829335,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035927937500000004,
"min": 0.0035927937500000004,
"max": 0.11692330979000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016714874655008316,
"min": 0.016714874655008316,
"max": 0.4590266942977905,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.23400825262069702,
"min": 0.23400825262069702,
"max": 3.672213554382324,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 533.0566037735849,
"min": 533.0566037735849,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28252.0,
"min": 16780.0,
"max": 33208.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0611925654941134,
"min": -0.9999375520274043,
"max": 1.183499964988894,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 57.30439853668213,
"min": -31.998001664876938,
"max": 63.90899810940027,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0611925654941134,
"min": -0.9999375520274043,
"max": 1.183499964988894,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 57.30439853668213,
"min": -31.998001664876938,
"max": 63.90899810940027,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09229153331101837,
"min": 0.09229153331101837,
"max": 9.013456904639801,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.983742798794992,
"min": 4.983742798794992,
"max": 162.2422242835164,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688480122",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688482225"
},
"total": 2102.786997524,
"count": 1,
"self": 0.47526047799965454,
"children": {
"run_training.setup": {
"total": 0.03715892300010637,
"count": 1,
"self": 0.03715892300010637
},
"TrainerController.start_learning": {
"total": 2102.274578123,
"count": 1,
"self": 1.327990988948386,
"children": {
"TrainerController._reset_env": {
"total": 3.8829866920000313,
"count": 1,
"self": 3.8829866920000313
},
"TrainerController.advance": {
"total": 2096.970990815051,
"count": 63407,
"self": 1.3446355780829435,
"children": {
"env_step": {
"total": 1473.3729054049966,
"count": 63407,
"self": 1364.1995590339188,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.38056753504998,
"count": 63407,
"self": 4.792143905035346,
"children": {
"TorchPolicy.evaluate": {
"total": 103.58842363001463,
"count": 62584,
"self": 103.58842363001463
}
}
},
"workers": {
"total": 0.7927788360277646,
"count": 63407,
"self": 0.0,
"children": {
"worker_root": {
"total": 2097.321575851027,
"count": 63407,
"is_parallel": true,
"self": 845.507585564063,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021120379999501893,
"count": 1,
"is_parallel": true,
"self": 0.0005927930003508664,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001519244999599323,
"count": 8,
"is_parallel": true,
"self": 0.001519244999599323
}
}
},
"UnityEnvironment.step": {
"total": 0.04814240299992889,
"count": 1,
"is_parallel": true,
"self": 0.0005378449998261203,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005014900000333,
"count": 1,
"is_parallel": true,
"self": 0.0005014900000333
},
"communicator.exchange": {
"total": 0.04516348900006051,
"count": 1,
"is_parallel": true,
"self": 0.04516348900006051
},
"steps_from_proto": {
"total": 0.0019395790000089619,
"count": 1,
"is_parallel": true,
"self": 0.000391335000131221,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015482439998777409,
"count": 8,
"is_parallel": true,
"self": 0.0015482439998777409
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1251.8139902869639,
"count": 63406,
"is_parallel": true,
"self": 32.632003283051745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.36172030198577,
"count": 63406,
"is_parallel": true,
"self": 23.36172030198577
},
"communicator.exchange": {
"total": 1092.3348011259834,
"count": 63406,
"is_parallel": true,
"self": 1092.3348011259834
},
"steps_from_proto": {
"total": 103.48546557594295,
"count": 63406,
"is_parallel": true,
"self": 20.465110925843874,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.02035465009908,
"count": 507248,
"is_parallel": true,
"self": 83.02035465009908
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 622.2534498319715,
"count": 63407,
"self": 2.459630503976541,
"children": {
"process_trajectory": {
"total": 109.25240635399632,
"count": 63407,
"self": 108.98354772799621,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26885862600011023,
"count": 2,
"self": 0.26885862600011023
}
}
},
"_update_policy": {
"total": 510.5414129739986,
"count": 450,
"self": 328.13241796701664,
"children": {
"TorchPPOOptimizer.update": {
"total": 182.40899500698197,
"count": 22854,
"self": 182.40899500698197
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.19000285648508e-07,
"count": 1,
"self": 9.19000285648508e-07
},
"TrainerController._save_models": {
"total": 0.09260870800017074,
"count": 1,
"self": 0.001377406000301562,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09123130199986917,
"count": 1,
"self": 0.09123130199986917
}
}
}
}
}
}
}