ppo-Pyramids / run_logs /timers.json
sweetfelinity's picture
Initial commit
0e3234f verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6310732364654541,
"min": 0.6310732364654541,
"max": 1.3959141969680786,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18952.390625,
"min": 18952.390625,
"max": 42346.453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.032886993139982224,
"min": -0.11363182961940765,
"max": 0.14573942124843597,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 8.090200424194336,
"min": -26.930744171142578,
"max": 36.58059310913086,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.012540374882519245,
"min": -0.008996750228106976,
"max": 0.37704503536224365,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.084932327270508,
"min": -2.2581841945648193,
"max": 89.35967254638672,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07299235814605796,
"min": 0.0663074805212213,
"max": 0.07392256023000274,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0218930140448115,
"min": 0.47847403233687535,
"max": 1.1073005621523513,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.005172306961840684,
"min": 7.009385213736068e-05,
"max": 0.008751396156690638,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.07241229746576958,
"min": 0.0009813139299230495,
"max": 0.13127094235035958,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.294983282657142e-06,
"min": 7.294983282657142e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001021297659572,
"min": 0.0001021297659572,
"max": 0.0035079632306789996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243162857142858,
"min": 0.10243162857142858,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340428,
"min": 1.3886848,
"max": 2.569321,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025291969428571436,
"min": 0.00025291969428571436,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003540875720000001,
"min": 0.003540875720000001,
"max": 0.1169551679,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006385961081832647,
"min": 0.006385961081832647,
"max": 0.41979971528053284,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08940345793962479,
"min": 0.08940345793962479,
"max": 2.9385979175567627,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 790.6944444444445,
"min": 699.5714285714286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28465.0,
"min": 15984.0,
"max": 33141.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.20904995728698042,
"min": -1.0000000521540642,
"max": 0.6335571007359595,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 7.525798462331295,
"min": -31.998801663517952,
"max": 26.6093982309103,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.20904995728698042,
"min": -1.0000000521540642,
"max": 0.6335571007359595,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 7.525798462331295,
"min": -31.998801663517952,
"max": 26.6093982309103,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.052445290076623984,
"min": 0.05149908337487085,
"max": 8.643005074933171,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8880304427584633,
"min": 1.8880304427584633,
"max": 138.28808119893074,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1710191480",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1710194531"
},
"total": 3050.9345805410003,
"count": 1,
"self": 1.0403221040000972,
"children": {
"run_training.setup": {
"total": 0.06504595500001642,
"count": 1,
"self": 0.06504595500001642
},
"TrainerController.start_learning": {
"total": 3049.829212482,
"count": 1,
"self": 2.230497359996207,
"children": {
"TrainerController._reset_env": {
"total": 3.059294367999996,
"count": 1,
"self": 3.059294367999996
},
"TrainerController.advance": {
"total": 3044.394948254004,
"count": 63232,
"self": 2.485702009943452,
"children": {
"env_step": {
"total": 1999.5822363420393,
"count": 63232,
"self": 1843.661012953981,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.53515758499776,
"count": 63232,
"self": 6.838763572977484,
"children": {
"TorchPolicy.evaluate": {
"total": 147.69639401202028,
"count": 62562,
"self": 147.69639401202028
}
}
},
"workers": {
"total": 1.3860658030604327,
"count": 63232,
"self": 0.0,
"children": {
"worker_root": {
"total": 3043.761343642002,
"count": 63232,
"is_parallel": true,
"self": 1379.3353660060393,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0057007320000366235,
"count": 1,
"is_parallel": true,
"self": 0.003970366999965336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017303650000712878,
"count": 8,
"is_parallel": true,
"self": 0.0017303650000712878
}
}
},
"UnityEnvironment.step": {
"total": 0.09822481600002675,
"count": 1,
"is_parallel": true,
"self": 0.0007344379999949524,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004952660000299147,
"count": 1,
"is_parallel": true,
"self": 0.0004952660000299147
},
"communicator.exchange": {
"total": 0.09512028500000724,
"count": 1,
"is_parallel": true,
"self": 0.09512028500000724
},
"steps_from_proto": {
"total": 0.0018748269999946388,
"count": 1,
"is_parallel": true,
"self": 0.00038232200006405037,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014925049999305884,
"count": 8,
"is_parallel": true,
"self": 0.0014925049999305884
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1664.4259776359627,
"count": 63231,
"is_parallel": true,
"self": 50.12638596799343,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.3000757210325,
"count": 63231,
"is_parallel": true,
"self": 28.3000757210325
},
"communicator.exchange": {
"total": 1457.3324657969724,
"count": 63231,
"is_parallel": true,
"self": 1457.3324657969724
},
"steps_from_proto": {
"total": 128.66705014996444,
"count": 63231,
"is_parallel": true,
"self": 27.291105369009642,
"children": {
"_process_rank_one_or_two_observation": {
"total": 101.3759447809548,
"count": 505848,
"is_parallel": true,
"self": 101.3759447809548
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1042.327009902021,
"count": 63232,
"self": 4.468622044008725,
"children": {
"process_trajectory": {
"total": 157.63803292100351,
"count": 63232,
"self": 157.36351337500332,
"children": {
"RLTrainer._checkpoint": {
"total": 0.27451954600019235,
"count": 2,
"self": 0.27451954600019235
}
}
},
"_update_policy": {
"total": 880.2203549370088,
"count": 444,
"self": 363.4256263960556,
"children": {
"TorchPPOOptimizer.update": {
"total": 516.7947285409532,
"count": 22779,
"self": 516.7947285409532
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3829999261361081e-06,
"count": 1,
"self": 1.3829999261361081e-06
},
"TrainerController._save_models": {
"total": 0.14447111700019377,
"count": 1,
"self": 0.0031317930001932837,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1413393240000005,
"count": 1,
"self": 0.1413393240000005
}
}
}
}
}
}
}