ppo-Pyramids / run_logs /timers.json
PranayPalem's picture
First Push
e879e25 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3373328149318695,
"min": 0.32857373356819153,
"max": 1.4600567817687988,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10119.984375,
"min": 9899.26953125,
"max": 44292.28125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989886.0,
"min": 29950.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989886.0,
"min": 29950.0,
"max": 989886.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.611911416053772,
"min": -0.11074133962392807,
"max": 0.640921413898468,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 174.39476013183594,
"min": -26.688663482666016,
"max": 180.73983764648438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.008956316858530045,
"min": -0.021521279588341713,
"max": 0.232005313038826,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -2.5525503158569336,
"min": -5.918352127075195,
"max": 55.6812744140625,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06914377712204847,
"min": 0.0636854866581202,
"max": 0.07308053427320015,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.037156656830727,
"min": 0.5612782330735666,
"max": 1.037156656830727,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017442305651881422,
"min": 0.0007936022857048034,
"max": 0.017442305651881422,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2616345847782213,
"min": 0.007936022857048034,
"max": 0.2616345847782213,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.538217487293336e-06,
"min": 7.538217487293336e-06,
"max": 0.000294840226719925,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011307326230940004,
"min": 0.00011307326230940004,
"max": 0.0032530313156562993,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251270666666666,
"min": 0.10251270666666666,
"max": 0.198280075,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376906,
"min": 1.4781118,
"max": 2.4438562000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026101939600000013,
"min": 0.00026101939600000013,
"max": 0.009828179492499999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039152909400000015,
"min": 0.0039152909400000015,
"max": 0.10845593562999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012433080933988094,
"min": 0.012433080933988094,
"max": 0.414697527885437,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18649621307849884,
"min": 0.17469522356987,
"max": 3.317580223083496,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 314.4901960784314,
"min": 292.25773195876286,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32078.0,
"min": 16493.0,
"max": 32572.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6286873617797222,
"min": -0.999987552408129,
"max": 1.6857687323354185,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 167.7547982633114,
"min": -31.999601677060127,
"max": 168.2327984124422,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6286873617797222,
"min": -0.999987552408129,
"max": 1.6857687323354185,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 167.7547982633114,
"min": -31.999601677060127,
"max": 168.2327984124422,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.039982945033523155,
"min": 0.038587199181468655,
"max": 8.104677708709941,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.118243338452885,
"min": 3.7043711214209907,
"max": 137.779521048069,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1746813587",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1746815952"
},
"total": 2365.5742822409998,
"count": 1,
"self": 0.49562586999991254,
"children": {
"run_training.setup": {
"total": 0.02237075499999719,
"count": 1,
"self": 0.02237075499999719
},
"TrainerController.start_learning": {
"total": 2365.056285616,
"count": 1,
"self": 1.2463083490233657,
"children": {
"TrainerController._reset_env": {
"total": 3.854604786999971,
"count": 1,
"self": 3.854604786999971
},
"TrainerController.advance": {
"total": 2359.870832491977,
"count": 64053,
"self": 1.3448385249507737,
"children": {
"env_step": {
"total": 1686.5702209010185,
"count": 64053,
"self": 1540.3555918460397,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.4636821109715,
"count": 64053,
"self": 4.43476926795546,
"children": {
"TorchPolicy.evaluate": {
"total": 141.02891284301603,
"count": 62566,
"self": 141.02891284301603
}
}
},
"workers": {
"total": 0.7509469440071825,
"count": 64053,
"self": 0.0,
"children": {
"worker_root": {
"total": 2360.1627328080162,
"count": 64053,
"is_parallel": true,
"self": 928.8497772090443,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059021569999799794,
"count": 1,
"is_parallel": true,
"self": 0.004497625000112748,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014045319998672312,
"count": 8,
"is_parallel": true,
"self": 0.0014045319998672312
}
}
},
"UnityEnvironment.step": {
"total": 0.04981738399999358,
"count": 1,
"is_parallel": true,
"self": 0.0005565999999248561,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000524062000010872,
"count": 1,
"is_parallel": true,
"self": 0.000524062000010872
},
"communicator.exchange": {
"total": 0.04508341900003643,
"count": 1,
"is_parallel": true,
"self": 0.04508341900003643
},
"steps_from_proto": {
"total": 0.003653303000021424,
"count": 1,
"is_parallel": true,
"self": 0.002381232999994154,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00127207000002727,
"count": 8,
"is_parallel": true,
"self": 0.00127207000002727
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1431.312955598972,
"count": 64052,
"is_parallel": true,
"self": 31.936476959950824,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.77027140102689,
"count": 64052,
"is_parallel": true,
"self": 22.77027140102689
},
"communicator.exchange": {
"total": 1283.9125824290077,
"count": 64052,
"is_parallel": true,
"self": 1283.9125824290077
},
"steps_from_proto": {
"total": 92.69362480898639,
"count": 64052,
"is_parallel": true,
"self": 18.3089070679909,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.38471774099548,
"count": 512416,
"is_parallel": true,
"self": 74.38471774099548
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 671.9557730660077,
"count": 64053,
"self": 2.472374998085911,
"children": {
"process_trajectory": {
"total": 127.52677588692023,
"count": 64053,
"self": 127.2531284449206,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2736474419996284,
"count": 2,
"self": 0.2736474419996284
}
}
},
"_update_policy": {
"total": 541.9566221810016,
"count": 449,
"self": 300.79980091801514,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.1568212629865,
"count": 22764,
"self": 241.1568212629865
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.679997674538754e-07,
"count": 1,
"self": 9.679997674538754e-07
},
"TrainerController._save_models": {
"total": 0.08453901999973823,
"count": 1,
"self": 0.0011170359998686763,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08342198399986955,
"count": 1,
"self": 0.08342198399986955
}
}
}
}
}
}
}