Alvin12345's picture
First Push
f03a115 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3213263750076294,
"min": 0.3213263750076294,
"max": 1.435050368309021,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9680.9208984375,
"min": 9680.9208984375,
"max": 43533.6875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989893.0,
"min": 29952.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989893.0,
"min": 29952.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6463717818260193,
"min": -0.07456464320421219,
"max": 0.6684888601303101,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 186.80145263671875,
"min": -17.97007942199707,
"max": 189.850830078125,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005671907681971788,
"min": -0.03081030398607254,
"max": 0.3506130576133728,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.6391812562942505,
"min": -8.472833633422852,
"max": 83.09529113769531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06963807962061505,
"min": 0.06502761516305267,
"max": 0.0726671888732975,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9749331146886107,
"min": 0.49046742006160865,
"max": 1.06762196754183,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016189691360479975,
"min": 0.0011747353933613215,
"max": 0.016189691360479975,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22665567904671963,
"min": 0.014096824720335858,
"max": 0.2372592464283419,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.689268865514286e-06,
"min": 7.689268865514286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001076497641172,
"min": 0.0001076497641172,
"max": 0.0036328099890633996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256305714285716,
"min": 0.10256305714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358828000000001,
"min": 1.3886848,
"max": 2.7072714,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026604940857142856,
"min": 0.00026604940857142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00372469172,
"min": 0.00372469172,
"max": 0.12111256634,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011386283673346043,
"min": 0.011386283673346043,
"max": 0.6463755965232849,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15940797328948975,
"min": 0.15940797328948975,
"max": 4.52462911605835,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 289.4,
"min": 287.11650485436894,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30387.0,
"min": 15984.0,
"max": 33485.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6343828366625877,
"min": -1.0000000521540642,
"max": 1.6934582304607317,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 171.6101978495717,
"min": -28.85040158778429,
"max": 174.42619773745537,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6343828366625877,
"min": -1.0000000521540642,
"max": 1.6934582304607317,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 171.6101978495717,
"min": -28.85040158778429,
"max": 174.42619773745537,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03397896363056221,
"min": 0.03397896363056221,
"max": 13.59286229684949,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5677911812090315,
"min": 3.5219399792258628,
"max": 217.48579674959183,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1720681642",
"python_version": "3.10.12 (main, Mar 22 2024, 16:50:05) [GCC 11.4.0]",
"command_line_arguments": "/home/sfchan/.local/bin/mlagents-learn --force ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1720682980"
},
"total": 1337.6181873519672,
"count": 1,
"self": 0.26914471783675253,
"children": {
"run_training.setup": {
"total": 0.03835906705353409,
"count": 1,
"self": 0.03835906705353409
},
"TrainerController.start_learning": {
"total": 1337.310683567077,
"count": 1,
"self": 0.7084168187575415,
"children": {
"TrainerController._reset_env": {
"total": 1.592584381927736,
"count": 1,
"self": 1.592584381927736
},
"TrainerController.advance": {
"total": 1334.93809969828,
"count": 64168,
"self": 0.6729919499484822,
"children": {
"env_step": {
"total": 937.7902666016016,
"count": 64168,
"self": 865.7689560323488,
"children": {
"SubprocessEnvManager._take_step": {
"total": 71.56108480656985,
"count": 64168,
"self": 2.3197027374990284,
"children": {
"TorchPolicy.evaluate": {
"total": 69.24138206907082,
"count": 62563,
"self": 69.24138206907082
}
}
},
"workers": {
"total": 0.4602257626829669,
"count": 64168,
"self": 0.0,
"children": {
"worker_root": {
"total": 1335.9129113269737,
"count": 64168,
"is_parallel": true,
"self": 531.0445204823045,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001213249983265996,
"count": 1,
"is_parallel": true,
"self": 0.0003221639199182391,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008910860633477569,
"count": 8,
"is_parallel": true,
"self": 0.0008910860633477569
}
}
},
"UnityEnvironment.step": {
"total": 0.030274133081547916,
"count": 1,
"is_parallel": true,
"self": 0.00048752513248473406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037480995524674654,
"count": 1,
"is_parallel": true,
"self": 0.00037480995524674654
},
"communicator.exchange": {
"total": 0.02809698600322008,
"count": 1,
"is_parallel": true,
"self": 0.02809698600322008
},
"steps_from_proto": {
"total": 0.001314811990596354,
"count": 1,
"is_parallel": true,
"self": 0.0002782170195132494,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010365949710831046,
"count": 8,
"is_parallel": true,
"self": 0.0010365949710831046
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 804.8683908446692,
"count": 64167,
"is_parallel": true,
"self": 24.98480334552005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 16.69777997583151,
"count": 64167,
"is_parallel": true,
"self": 16.69777997583151
},
"communicator.exchange": {
"total": 698.5365367666818,
"count": 64167,
"is_parallel": true,
"self": 698.5365367666818
},
"steps_from_proto": {
"total": 64.64927075663581,
"count": 64167,
"is_parallel": true,
"self": 12.492868118686602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 52.15640263794921,
"count": 513336,
"is_parallel": true,
"self": 52.15640263794921
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 396.4748411467299,
"count": 64168,
"self": 1.302483921404928,
"children": {
"process_trajectory": {
"total": 69.73687639774289,
"count": 64168,
"self": 69.54055128467735,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1963251130655408,
"count": 2,
"self": 0.1963251130655408
}
}
},
"_update_policy": {
"total": 325.43548082758207,
"count": 456,
"self": 178.87009435950313,
"children": {
"TorchPPOOptimizer.update": {
"total": 146.56538646807894,
"count": 22791,
"self": 146.56538646807894
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.910405889153481e-07,
"count": 1,
"self": 5.910405889153481e-07
},
"TrainerController._save_models": {
"total": 0.07158207707107067,
"count": 1,
"self": 0.002209675032645464,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06937240203842521,
"count": 1,
"self": 0.06937240203842521
}
}
}
}
}
}
}