ppo-PyramidsRND / run_logs /timers.json
hishamcse's picture
First Push
fdd95b4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.837311327457428,
"min": 0.7887104749679565,
"max": 1.469686508178711,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 25065.751953125,
"min": 23623.45703125,
"max": 44584.41015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989982.0,
"min": 29952.0,
"max": 989982.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.06642565131187439,
"min": -0.16104307770729065,
"max": 0.14297114312648773,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 16.27428436279297,
"min": -38.16720962524414,
"max": 36.17169952392578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025431759655475616,
"min": -0.06733652204275131,
"max": 0.30287840962409973,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.230781078338623,
"min": -16.96880340576172,
"max": 72.99369812011719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06552543547955565,
"min": 0.06552543547955565,
"max": 0.07366455763008571,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9828815321933349,
"min": 0.5156519034106,
"max": 1.060121506780121,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.006884446493345689,
"min": 0.0006709692115277738,
"max": 0.010788520641220939,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.10326669740018533,
"min": 0.00890061046903283,
"max": 0.15103928897709315,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4177975274333314e-06,
"min": 7.4177975274333314e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011126696291149997,
"min": 0.00011126696291149997,
"max": 0.0032261200246267003,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247256666666667,
"min": 0.10247256666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5370885,
"min": 1.3886848,
"max": 2.3598060000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025700941,
"min": 0.00025700941,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038551411499999997,
"min": 0.0038551411499999997,
"max": 0.10754979267000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014785146340727806,
"min": 0.014499468728899956,
"max": 0.43704846501350403,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22177720069885254,
"min": 0.20299255847930908,
"max": 3.0593392848968506,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 817.7428571428571,
"min": 664.9111111111112,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28621.0,
"min": 15984.0,
"max": 33374.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.09615995628493172,
"min": -1.0000000521540642,
"max": 0.7126532935433918,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 3.3655984699726105,
"min": -29.720401614904404,
"max": 32.06939820945263,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.09615995628493172,
"min": -1.0000000521540642,
"max": 0.7126532935433918,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 3.3655984699726105,
"min": -29.720401614904404,
"max": 32.06939820945263,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.12650357860007455,
"min": 0.09950008938580544,
"max": 8.075279124081135,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.42762525100261,
"min": 4.42762525100261,
"max": 129.20446598529816,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718814642",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718816674"
},
"total": 2031.802959925,
"count": 1,
"self": 0.7300454520002404,
"children": {
"run_training.setup": {
"total": 0.05360960799998793,
"count": 1,
"self": 0.05360960799998793
},
"TrainerController.start_learning": {
"total": 2031.0193048649999,
"count": 1,
"self": 1.2550726110050618,
"children": {
"TrainerController._reset_env": {
"total": 2.635279113000024,
"count": 1,
"self": 2.635279113000024
},
"TrainerController.advance": {
"total": 2026.9848146969948,
"count": 63097,
"self": 1.3615411590353688,
"children": {
"env_step": {
"total": 1404.3245475619829,
"count": 63097,
"self": 1277.7633800289802,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.80361632400809,
"count": 63097,
"self": 4.5362445650430345,
"children": {
"TorchPolicy.evaluate": {
"total": 121.26737175896506,
"count": 62574,
"self": 121.26737175896506
}
}
},
"workers": {
"total": 0.7575512089945278,
"count": 63097,
"self": 0.0,
"children": {
"worker_root": {
"total": 2026.2169061950049,
"count": 63097,
"is_parallel": true,
"self": 863.3552926260238,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020610029998806567,
"count": 1,
"is_parallel": true,
"self": 0.0005763550000210671,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014846479998595896,
"count": 8,
"is_parallel": true,
"self": 0.0014846479998595896
}
}
},
"UnityEnvironment.step": {
"total": 0.06994037999993452,
"count": 1,
"is_parallel": true,
"self": 0.0006256759997995687,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004221180001877656,
"count": 1,
"is_parallel": true,
"self": 0.0004221180001877656
},
"communicator.exchange": {
"total": 0.06734133499980999,
"count": 1,
"is_parallel": true,
"self": 0.06734133499980999
},
"steps_from_proto": {
"total": 0.0015512510001371993,
"count": 1,
"is_parallel": true,
"self": 0.0003161659999477706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012350850001894287,
"count": 8,
"is_parallel": true,
"self": 0.0012350850001894287
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1162.861613568981,
"count": 63096,
"is_parallel": true,
"self": 32.84048656204368,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.549250934976726,
"count": 63096,
"is_parallel": true,
"self": 22.549250934976726
},
"communicator.exchange": {
"total": 1012.9280660169338,
"count": 63096,
"is_parallel": true,
"self": 1012.9280660169338
},
"steps_from_proto": {
"total": 94.54381005502682,
"count": 63096,
"is_parallel": true,
"self": 18.501942053943367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.04186800108346,
"count": 504768,
"is_parallel": true,
"self": 76.04186800108346
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 621.2987259759766,
"count": 63097,
"self": 2.424844886013261,
"children": {
"process_trajectory": {
"total": 123.41343956696073,
"count": 63097,
"self": 123.17311942696142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24032013999931223,
"count": 2,
"self": 0.24032013999931223
}
}
},
"_update_policy": {
"total": 495.4604415230026,
"count": 441,
"self": 293.34131712902195,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.11912439398066,
"count": 22806,
"self": 202.11912439398066
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.238000095327152e-06,
"count": 1,
"self": 1.238000095327152e-06
},
"TrainerController._save_models": {
"total": 0.14413720599986846,
"count": 1,
"self": 0.0018808379995789437,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1422563680002895,
"count": 1,
"self": 0.1422563680002895
}
}
}
}
}
}
}