kirby44's picture
First Push
e780dff
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6655287742614746,
"min": 0.5389033555984497,
"max": 1.494441270828247,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19816.78515625,
"min": 16141.232421875,
"max": 45335.37109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989913.0,
"min": 29952.0,
"max": 989913.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.12512804567813873,
"min": -0.10939469933509827,
"max": 0.15140748023986816,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 31.031753540039062,
"min": -26.36412239074707,
"max": 37.851871490478516,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007220172788947821,
"min": 0.0069918641820549965,
"max": 0.19987423717975616,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7906028032302856,
"min": 1.7479660511016846,
"max": 47.969818115234375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06403479703816328,
"min": 0.06400217306680347,
"max": 0.07422540338763103,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.896487158534286,
"min": 0.4815144617141936,
"max": 1.0391556474268344,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007908610623554762,
"min": 0.00014044934850734138,
"max": 0.00872699326453799,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11072054872976665,
"min": 0.0018258415305954377,
"max": 0.13090489896806984,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.3432404094285764e-06,
"min": 7.3432404094285764e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010280536573200007,
"min": 0.00010280536573200007,
"max": 0.0033758569747144,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024477142857143,
"min": 0.1024477142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434268,
"min": 1.3691136000000002,
"max": 2.4252856,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002545266571428573,
"min": 0.0002545266571428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003563373200000002,
"min": 0.003563373200000002,
"max": 0.11254603144,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009860271587967873,
"min": 0.009860271587967873,
"max": 0.39873525500297546,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13804380595684052,
"min": 0.13804380595684052,
"max": 2.791146755218506,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 794.9,
"min": 664.8571428571429,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31796.0,
"min": 15984.0,
"max": 32505.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.3047899706289172,
"min": -1.0000000521540642,
"max": 0.6682904422992751,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 12.191598825156689,
"min": -32.000001668930054,
"max": 30.113798573613167,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.3047899706289172,
"min": -1.0000000521540642,
"max": 0.6682904422992751,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 12.191598825156689,
"min": -32.000001668930054,
"max": 30.113798573613167,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08095270719204564,
"min": 0.07146937660789783,
"max": 8.318164428696036,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2381082876818255,
"min": 3.0017138175317086,
"max": 133.09063085913658,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690083424",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690085638"
},
"total": 2214.217931763,
"count": 1,
"self": 0.4816712550000375,
"children": {
"run_training.setup": {
"total": 0.059857391999685206,
"count": 1,
"self": 0.059857391999685206
},
"TrainerController.start_learning": {
"total": 2213.676403116,
"count": 1,
"self": 1.3156799130711079,
"children": {
"TrainerController._reset_env": {
"total": 5.467596311000307,
"count": 1,
"self": 5.467596311000307
},
"TrainerController.advance": {
"total": 2206.798115811929,
"count": 63222,
"self": 1.3679226088256655,
"children": {
"env_step": {
"total": 1539.0037825990457,
"count": 63222,
"self": 1429.8563797829042,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.32972032699854,
"count": 63222,
"self": 4.718130756014034,
"children": {
"TorchPolicy.evaluate": {
"total": 103.6115895709845,
"count": 62567,
"self": 103.6115895709845
}
}
},
"workers": {
"total": 0.8176824891429533,
"count": 63222,
"self": 0.0,
"children": {
"worker_root": {
"total": 2208.821470108045,
"count": 63222,
"is_parallel": true,
"self": 892.0103971990834,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002487773000211746,
"count": 1,
"is_parallel": true,
"self": 0.0006824430015512917,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018053299986604543,
"count": 8,
"is_parallel": true,
"self": 0.0018053299986604543
}
}
},
"UnityEnvironment.step": {
"total": 0.04890659600005165,
"count": 1,
"is_parallel": true,
"self": 0.000560612000299443,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005013229997530289,
"count": 1,
"is_parallel": true,
"self": 0.0005013229997530289
},
"communicator.exchange": {
"total": 0.04604842100025053,
"count": 1,
"is_parallel": true,
"self": 0.04604842100025053
},
"steps_from_proto": {
"total": 0.00179623999974865,
"count": 1,
"is_parallel": true,
"self": 0.00036348100047689513,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014327589992717549,
"count": 8,
"is_parallel": true,
"self": 0.0014327589992717549
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1316.8110729089617,
"count": 63221,
"is_parallel": true,
"self": 34.00328447793299,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.173003021091063,
"count": 63221,
"is_parallel": true,
"self": 23.173003021091063
},
"communicator.exchange": {
"total": 1158.9635395780097,
"count": 63221,
"is_parallel": true,
"self": 1158.9635395780097
},
"steps_from_proto": {
"total": 100.67124583192799,
"count": 63221,
"is_parallel": true,
"self": 19.964180266268613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.70706556565938,
"count": 505768,
"is_parallel": true,
"self": 80.70706556565938
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.4264106040578,
"count": 63222,
"self": 2.2878873740382915,
"children": {
"process_trajectory": {
"total": 108.91775579602108,
"count": 63222,
"self": 108.64933674702161,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2684190489994762,
"count": 2,
"self": 0.2684190489994762
}
}
},
"_update_policy": {
"total": 555.2207674339984,
"count": 440,
"self": 360.9715234320406,
"children": {
"TorchPPOOptimizer.update": {
"total": 194.24924400195778,
"count": 22830,
"self": 194.24924400195778
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.030000001075678e-07,
"count": 1,
"self": 9.030000001075678e-07
},
"TrainerController._save_models": {
"total": 0.09501017699949443,
"count": 1,
"self": 0.0014990039990152582,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09351117300047918,
"count": 1,
"self": 0.09351117300047918
}
}
}
}
}
}
}