ppo-Pyramids / run_logs /timers.json
soonawg's picture
First Push
8d1bf92 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3281592130661011,
"min": 0.32165196537971497,
"max": 1.4837335348129272,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9876.279296875,
"min": 9695.876953125,
"max": 45010.5390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989992.0,
"min": 29909.0,
"max": 989992.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989992.0,
"min": 29909.0,
"max": 989992.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5952149629592896,
"min": -0.08172186464071274,
"max": 0.6661695241928101,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 167.85061645507812,
"min": -19.694969177246094,
"max": 188.52597045898438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.004132273141294718,
"min": -0.03298287093639374,
"max": 0.38849225640296936,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.165300965309143,
"min": -8.575546264648438,
"max": 92.07266235351562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0715828786044204,
"min": 0.0652579231204332,
"max": 0.07393912137125706,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0021603004618858,
"min": 0.5329930424354318,
"max": 1.0695751500681188,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01757991781293337,
"min": 0.0008882467414076393,
"max": 0.01757991781293337,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24611884938106718,
"min": 0.009770714155484032,
"max": 0.2558342598301048,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.503461784592861e-06,
"min": 7.503461784592861e-06,
"max": 0.00029484483921838754,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010504846498430005,
"min": 0.00010504846498430005,
"max": 0.0036336073887976,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250112142857142,
"min": 0.10250112142857142,
"max": 0.19828161249999998,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350157,
"min": 1.4350157,
"max": 2.6112024000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025986203071428585,
"min": 0.00025986203071428585,
"max": 0.009828333088749999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003638068430000002,
"min": 0.003638068430000002,
"max": 0.12113911976,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011744825169444084,
"min": 0.011744825169444084,
"max": 0.3779070973396301,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16442754864692688,
"min": 0.16442754864692688,
"max": 3.023256778717041,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 318.15625,
"min": 276.921568627451,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30543.0,
"min": 16452.0,
"max": 31975.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6610083114355803,
"min": -0.999962551984936,
"max": 1.6706117447830884,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 159.4567978978157,
"min": -31.998801663517952,
"max": 170.402397967875,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6610083114355803,
"min": -0.999962551984936,
"max": 1.6706117447830884,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 159.4567978978157,
"min": -31.998801663517952,
"max": 170.402397967875,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03877655189292758,
"min": 0.03522995833191089,
"max": 7.24382467304959,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.7225489817210473,
"min": 3.593455749854911,
"max": 123.14501944184303,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742626661",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742628827"
},
"total": 2165.96559194,
"count": 1,
"self": 0.47555142100009107,
"children": {
"run_training.setup": {
"total": 0.020156775000032212,
"count": 1,
"self": 0.020156775000032212
},
"TrainerController.start_learning": {
"total": 2165.469883744,
"count": 1,
"self": 1.2346847710150541,
"children": {
"TrainerController._reset_env": {
"total": 2.1805495979999705,
"count": 1,
"self": 2.1805495979999705
},
"TrainerController.advance": {
"total": 2161.963951279985,
"count": 64131,
"self": 1.268858972968701,
"children": {
"env_step": {
"total": 1491.0688534659992,
"count": 64131,
"self": 1349.07041189803,
"children": {
"SubprocessEnvManager._take_step": {
"total": 141.2985489579405,
"count": 64131,
"self": 4.325191567955471,
"children": {
"TorchPolicy.evaluate": {
"total": 136.97335738998504,
"count": 62553,
"self": 136.97335738998504
}
}
},
"workers": {
"total": 0.6998926100286553,
"count": 64131,
"self": 0.0,
"children": {
"worker_root": {
"total": 2160.6979003250676,
"count": 64131,
"is_parallel": true,
"self": 915.1134561240403,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020501799999692594,
"count": 1,
"is_parallel": true,
"self": 0.00066730499975165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013828750002176093,
"count": 8,
"is_parallel": true,
"self": 0.0013828750002176093
}
}
},
"UnityEnvironment.step": {
"total": 0.051317749000190815,
"count": 1,
"is_parallel": true,
"self": 0.0005264070000521315,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047331900009339734,
"count": 1,
"is_parallel": true,
"self": 0.00047331900009339734
},
"communicator.exchange": {
"total": 0.04872638699998788,
"count": 1,
"is_parallel": true,
"self": 0.04872638699998788
},
"steps_from_proto": {
"total": 0.001591636000057406,
"count": 1,
"is_parallel": true,
"self": 0.00035558500007937255,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012360509999780334,
"count": 8,
"is_parallel": true,
"self": 0.0012360509999780334
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1245.5844442010273,
"count": 64130,
"is_parallel": true,
"self": 30.381393043998514,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.52377113901298,
"count": 64130,
"is_parallel": true,
"self": 22.52377113901298
},
"communicator.exchange": {
"total": 1101.9132272720574,
"count": 64130,
"is_parallel": true,
"self": 1101.9132272720574
},
"steps_from_proto": {
"total": 90.7660527459584,
"count": 64130,
"is_parallel": true,
"self": 17.874588251943806,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.8914644940146,
"count": 513040,
"is_parallel": true,
"self": 72.8914644940146
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 669.6262388410171,
"count": 64131,
"self": 2.413667776028433,
"children": {
"process_trajectory": {
"total": 123.741688839988,
"count": 64131,
"self": 123.50259761898815,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23909122099985325,
"count": 2,
"self": 0.23909122099985325
}
}
},
"_update_policy": {
"total": 543.4708822250007,
"count": 457,
"self": 300.0239822970468,
"children": {
"TorchPPOOptimizer.update": {
"total": 243.44689992795384,
"count": 22812,
"self": 243.44689992795384
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3789999684377108e-06,
"count": 1,
"self": 1.3789999684377108e-06
},
"TrainerController._save_models": {
"total": 0.090696716000366,
"count": 1,
"self": 0.001467031000629504,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08922968499973649,
"count": 1,
"self": 0.08922968499973649
}
}
}
}
}
}
}