electricwapiti's picture
First push - decent pyramids
e5d2fb1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3736235797405243,
"min": 0.36460861563682556,
"max": 1.4195291996002197,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11232.619140625,
"min": 10949.92578125,
"max": 30851.2734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989905.0,
"min": 29952.0,
"max": 989905.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6039798855781555,
"min": -0.28652840852737427,
"max": 0.6600787043571472,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 170.92630004882812,
"min": -44.984962463378906,
"max": 188.1224365234375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.021872790530323982,
"min": -0.005482618696987629,
"max": 0.36503687500953674,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.189999580383301,
"min": -1.4583765268325806,
"max": 75.03805541992188,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06779669387736756,
"min": 0.06529612526176426,
"max": 0.07426509082067462,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9491537142831458,
"min": 0.29706036328269847,
"max": 1.0538714728851724,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016674932191356774,
"min": 0.0014756244655713424,
"max": 0.01771732931125111,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23344905067899482,
"min": 0.011804995724570739,
"max": 0.24804261035751551,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.296783282057142e-06,
"min": 7.296783282057142e-06,
"max": 0.00029388480203839995,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010215496594879999,
"min": 0.00010215496594879999,
"max": 0.0036308656897114998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243222857142856,
"min": 0.10243222857142856,
"max": 0.1979616,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340511999999999,
"min": 0.7918464,
"max": 2.6102885000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002529796342857143,
"min": 0.0002529796342857143,
"max": 0.00979636384,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00354171488,
"min": 0.00354171488,
"max": 0.12104782115000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016214115545153618,
"min": 0.016141671687364578,
"max": 0.27473512291908264,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22699762880802155,
"min": 0.2259833961725235,
"max": 1.5110154151916504,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 315.63829787234044,
"min": 289.35185185185185,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29670.0,
"min": 15984.0,
"max": 33826.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6612589304384433,
"min": -1.0000000521540642,
"max": 1.6753121345678224,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 157.8195983916521,
"min": -32.000001668930054,
"max": 179.25839839875698,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6612589304384433,
"min": -1.0000000521540642,
"max": 1.6753121345678224,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 157.8195983916521,
"min": -32.000001668930054,
"max": 179.25839839875698,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05213109296915048,
"min": 0.0485315383749902,
"max": 3.5907402615994215,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.9524538320692955,
"min": 4.924939605945838,
"max": 66.36549647897482,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723833297",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723836772"
},
"total": 3474.8708987400005,
"count": 1,
"self": 0.6339558580007179,
"children": {
"run_training.setup": {
"total": 0.07300747599992974,
"count": 1,
"self": 0.07300747599992974
},
"TrainerController.start_learning": {
"total": 3474.163935406,
"count": 1,
"self": 2.3437207039269197,
"children": {
"TrainerController._reset_env": {
"total": 2.5242195130003893,
"count": 1,
"self": 2.5242195130003893
},
"TrainerController.advance": {
"total": 3469.211450492072,
"count": 63487,
"self": 2.550986286089028,
"children": {
"env_step": {
"total": 2326.8957188870763,
"count": 63487,
"self": 2155.709764848056,
"children": {
"SubprocessEnvManager._take_step": {
"total": 169.69631938297107,
"count": 63487,
"self": 7.281243020009242,
"children": {
"TorchPolicy.evaluate": {
"total": 162.41507636296183,
"count": 61912,
"self": 162.41507636296183
}
}
},
"workers": {
"total": 1.4896346560494749,
"count": 63487,
"self": 0.0,
"children": {
"worker_root": {
"total": 3466.6556297000297,
"count": 63487,
"is_parallel": true,
"self": 1502.3325688270706,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00238238999963869,
"count": 1,
"is_parallel": true,
"self": 0.000771109999732289,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001611279999906401,
"count": 8,
"is_parallel": true,
"self": 0.001611279999906401
}
}
},
"UnityEnvironment.step": {
"total": 0.0642541520001032,
"count": 1,
"is_parallel": true,
"self": 0.0007831699999769626,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005168220000086876,
"count": 1,
"is_parallel": true,
"self": 0.0005168220000086876
},
"communicator.exchange": {
"total": 0.060853226000290306,
"count": 1,
"is_parallel": true,
"self": 0.060853226000290306
},
"steps_from_proto": {
"total": 0.002100933999827248,
"count": 1,
"is_parallel": true,
"self": 0.0005758269994657894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015251070003614586,
"count": 8,
"is_parallel": true,
"self": 0.0015251070003614586
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1964.323060872959,
"count": 63486,
"is_parallel": true,
"self": 50.896630410177295,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.042400072904456,
"count": 63486,
"is_parallel": true,
"self": 31.042400072904456
},
"communicator.exchange": {
"total": 1751.4426359069498,
"count": 63486,
"is_parallel": true,
"self": 1751.4426359069498
},
"steps_from_proto": {
"total": 130.94139448292754,
"count": 63486,
"is_parallel": true,
"self": 28.458836095132483,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.48255838779505,
"count": 507888,
"is_parallel": true,
"self": 102.48255838779505
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1139.7647453189065,
"count": 63487,
"self": 4.860655228983433,
"children": {
"process_trajectory": {
"total": 173.993510690917,
"count": 63487,
"self": 173.80143805491662,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1920726360003755,
"count": 2,
"self": 0.1920726360003755
}
}
},
"_update_policy": {
"total": 960.9105793990061,
"count": 449,
"self": 387.4049755339938,
"children": {
"TorchPPOOptimizer.update": {
"total": 573.5056038650123,
"count": 22548,
"self": 573.5056038650123
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.088000317395199e-06,
"count": 1,
"self": 1.088000317395199e-06
},
"TrainerController._save_models": {
"total": 0.08454360900032043,
"count": 1,
"self": 0.002645734000907396,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08189787499941303,
"count": 1,
"self": 0.08189787499941303
}
}
}
}
}
}
}