Akchunks's picture
First push
4a5aaeb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.39060187339782715,
"min": 0.37997695803642273,
"max": 1.4455169439315796,
"count": 37
},
"Pyramids.Policy.Entropy.sum": {
"value": 11655.5595703125,
"min": 11460.10546875,
"max": 43851.203125,
"count": 37
},
"Pyramids.Step.mean": {
"value": 1109931.0,
"min": 29938.0,
"max": 1109931.0,
"count": 37
},
"Pyramids.Step.sum": {
"value": 1109931.0,
"min": 29938.0,
"max": 1109931.0,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6187918782234192,
"min": -0.10366135835647583,
"max": 0.6187918782234192,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 175.7368927001953,
"min": -24.56774139404297,
"max": 175.7368927001953,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05420574173331261,
"min": -0.0045932866632938385,
"max": 0.4629976153373718,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 15.394431114196777,
"min": -1.2172210216522217,
"max": 109.73043823242188,
"count": 37
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07081320491273967,
"min": 0.06555370301616716,
"max": 0.07302032052423157,
"count": 37
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.062198073691095,
"min": 0.5026800625806342,
"max": 1.088038160397749,
"count": 37
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01402736656865968,
"min": 0.00016224068144858391,
"max": 0.016054073384212744,
"count": 37
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21041049852989518,
"min": 0.002271369540280175,
"max": 0.24081110076319118,
"count": 37
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00019050270316577775,
"min": 0.00019050270316577775,
"max": 0.000298411171958181,
"count": 37
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0028575405474866662,
"min": 0.002088878203707267,
"max": 0.003927354290881932,
"count": 37
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16350088888888886,
"min": 0.16350088888888886,
"max": 0.1994703904761905,
"count": 37
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.452513333333333,
"min": 1.3962927333333335,
"max": 2.797389933333333,
"count": 37
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006353738799999999,
"min": 0.006353738799999999,
"max": 0.009947092008571428,
"count": 37
},
"Pyramids.Policy.Beta.sum": {
"value": 0.09530608199999999,
"min": 0.06962964406,
"max": 0.13092089486000003,
"count": 37
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011954035609960556,
"min": 0.011251576244831085,
"max": 0.4659583270549774,
"count": 37
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17931053042411804,
"min": 0.1575220674276352,
"max": 3.2617082595825195,
"count": 37
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 339.83505154639175,
"min": 325.4791666666667,
"max": 999.0,
"count": 37
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32964.0,
"min": 16609.0,
"max": 32964.0,
"count": 37
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5776639023853332,
"min": -0.9999467186629772,
"max": 1.6415325128529445,
"count": 37
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 153.03339853137732,
"min": -31.99760165810585,
"max": 154.58959813416004,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5776639023853332,
"min": -0.9999467186629772,
"max": 1.6415325128529445,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 153.03339853137732,
"min": -31.99760165810585,
"max": 154.58959813416004,
"count": 37
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04255740825847732,
"min": 0.04255740825847732,
"max": 8.036914406453862,
"count": 37
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.1280686010723,
"min": 3.1762158229539637,
"max": 136.62754490971565,
"count": 37
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748441057",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748443592"
},
"total": 2535.08233905,
"count": 1,
"self": 0.3378771490001782,
"children": {
"run_training.setup": {
"total": 0.023104725999928633,
"count": 1,
"self": 0.023104725999928633
},
"TrainerController.start_learning": {
"total": 2534.721357175,
"count": 1,
"self": 1.6298409209052807,
"children": {
"TrainerController._reset_env": {
"total": 2.9527891780001028,
"count": 1,
"self": 2.9527891780001028
},
"TrainerController.advance": {
"total": 2530.0028804160947,
"count": 72127,
"self": 1.6560944241227844,
"children": {
"env_step": {
"total": 1755.5562422859548,
"count": 72127,
"self": 1580.6781455539833,
"children": {
"SubprocessEnvManager._take_step": {
"total": 173.96225348203916,
"count": 72127,
"self": 5.266552919092646,
"children": {
"TorchPolicy.evaluate": {
"total": 168.6957005629465,
"count": 70602,
"self": 168.6957005629465
}
}
},
"workers": {
"total": 0.9158432499323226,
"count": 72127,
"self": 0.0,
"children": {
"worker_root": {
"total": 2527.6631967460526,
"count": 72127,
"is_parallel": true,
"self": 1076.515179005974,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028029379998315562,
"count": 1,
"is_parallel": true,
"self": 0.0008541049994619243,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001948833000369632,
"count": 8,
"is_parallel": true,
"self": 0.001948833000369632
}
}
},
"UnityEnvironment.step": {
"total": 0.07799153899986777,
"count": 1,
"is_parallel": true,
"self": 0.0005560260001402639,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048350099996241624,
"count": 1,
"is_parallel": true,
"self": 0.00048350099996241624
},
"communicator.exchange": {
"total": 0.07539057399981175,
"count": 1,
"is_parallel": true,
"self": 0.07539057399981175
},
"steps_from_proto": {
"total": 0.0015614379999533412,
"count": 1,
"is_parallel": true,
"self": 0.00032423199991171714,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001237206000041624,
"count": 8,
"is_parallel": true,
"self": 0.001237206000041624
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1451.1480177400786,
"count": 72126,
"is_parallel": true,
"self": 36.24167097401573,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.582295143015017,
"count": 72126,
"is_parallel": true,
"self": 25.582295143015017
},
"communicator.exchange": {
"total": 1282.111661399016,
"count": 72126,
"is_parallel": true,
"self": 1282.111661399016
},
"steps_from_proto": {
"total": 107.21239022403188,
"count": 72126,
"is_parallel": true,
"self": 21.931795288123567,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.28059493590831,
"count": 577008,
"is_parallel": true,
"self": 85.28059493590831
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 772.7905437060169,
"count": 72127,
"self": 3.123921530001553,
"children": {
"process_trajectory": {
"total": 145.87630857501335,
"count": 72127,
"self": 145.636189474013,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24011910100034584,
"count": 2,
"self": 0.24011910100034584
}
}
},
"_update_policy": {
"total": 623.790313601002,
"count": 512,
"self": 345.999247104975,
"children": {
"TorchPPOOptimizer.update": {
"total": 277.79106649602704,
"count": 25728,
"self": 277.79106649602704
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3769999895885121e-06,
"count": 1,
"self": 1.3769999895885121e-06
},
"TrainerController._save_models": {
"total": 0.13584528299998055,
"count": 1,
"self": 0.001602483000169741,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1342427999998108,
"count": 1,
"self": 0.1342427999998108
}
}
}
}
}
}
}