ppo-Pyramids-v2 / run_logs /timers.json
GinesMeca's picture
First Push
fada1b0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31244075298309326,
"min": 0.30735117197036743,
"max": 1.494243860244751,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9558.1875,
"min": 9112.34765625,
"max": 45329.3828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989975.0,
"min": 29952.0,
"max": 989975.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989975.0,
"min": 29952.0,
"max": 989975.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6163113713264465,
"min": -0.0742742121219635,
"max": 0.668003499507904,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.11398315429688,
"min": -17.9743595123291,
"max": 189.04498291015625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.019648682326078415,
"min": 0.009604482911527157,
"max": 0.2914630174636841,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.678469181060791,
"min": 2.7084641456604004,
"max": 69.07673645019531,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07058728970100288,
"min": 0.0644643053894116,
"max": 0.0738104028326537,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0588093455150431,
"min": 0.4965206905508658,
"max": 1.0795290299643323,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018715627555046046,
"min": 0.000497202369253022,
"max": 0.018715627555046046,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2807344133256907,
"min": 0.005966428431036264,
"max": 0.2807344133256907,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.240943085146667e-05,
"min": 1.240943085146667e-05,
"max": 0.0004919177159021714,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00018614146277200005,
"min": 0.00018614146277200005,
"max": 0.005421855215628999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248186666666668,
"min": 0.10248186666666668,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372280000000003,
"min": 1.3886848,
"max": 2.4427157999999998,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025793848000000007,
"min": 0.00025793848000000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003869077200000001,
"min": 0.003869077200000001,
"max": 0.1084586629,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014859267510473728,
"min": 0.014859267510473728,
"max": 0.46216633915901184,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2228890061378479,
"min": 0.2105051875114441,
"max": 3.2351644039154053,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 290.7196261682243,
"min": 286.1333333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31107.0,
"min": 15984.0,
"max": 32279.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.670020543387003,
"min": -1.0000000521540642,
"max": 1.6948094997377623,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 178.69219814240932,
"min": -29.945001646876335,
"max": 178.69219814240932,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.670020543387003,
"min": -1.0000000521540642,
"max": 1.6948094997377623,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 178.69219814240932,
"min": -29.945001646876335,
"max": 178.69219814240932,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04484530939083007,
"min": 0.04484530939083007,
"max": 9.45072509534657,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.798448104818817,
"min": 4.471678686488303,
"max": 151.21160152554512,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747652934",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training v2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747655355"
},
"total": 2420.8599100310003,
"count": 1,
"self": 0.8321350750002239,
"children": {
"run_training.setup": {
"total": 0.020975700999770197,
"count": 1,
"self": 0.020975700999770197
},
"TrainerController.start_learning": {
"total": 2420.0067992550003,
"count": 1,
"self": 1.5446296361124041,
"children": {
"TrainerController._reset_env": {
"total": 2.433267133999834,
"count": 1,
"self": 2.433267133999834
},
"TrainerController.advance": {
"total": 2415.905521082889,
"count": 64020,
"self": 1.6320995739652062,
"children": {
"env_step": {
"total": 1716.8344552880208,
"count": 64020,
"self": 1557.055329250772,
"children": {
"SubprocessEnvManager._take_step": {
"total": 158.89160658711262,
"count": 64020,
"self": 5.053982813109542,
"children": {
"TorchPolicy.evaluate": {
"total": 153.83762377400308,
"count": 62564,
"self": 153.83762377400308
}
}
},
"workers": {
"total": 0.8875194501360966,
"count": 64020,
"self": 0.0,
"children": {
"worker_root": {
"total": 2414.223065369906,
"count": 64020,
"is_parallel": true,
"self": 978.4718187139833,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021586550001302385,
"count": 1,
"is_parallel": true,
"self": 0.0007011620009507169,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014574929991795216,
"count": 8,
"is_parallel": true,
"self": 0.0014574929991795216
}
}
},
"UnityEnvironment.step": {
"total": 0.050109202999919944,
"count": 1,
"is_parallel": true,
"self": 0.0005504600003405358,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048208999942289665,
"count": 1,
"is_parallel": true,
"self": 0.00048208999942289665
},
"communicator.exchange": {
"total": 0.047460402000069735,
"count": 1,
"is_parallel": true,
"self": 0.047460402000069735
},
"steps_from_proto": {
"total": 0.001616251000086777,
"count": 1,
"is_parallel": true,
"self": 0.0003638439993665088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012524070007202681,
"count": 8,
"is_parallel": true,
"self": 0.0012524070007202681
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1435.7512466559228,
"count": 64019,
"is_parallel": true,
"self": 32.86834825801361,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.2808961050132,
"count": 64019,
"is_parallel": true,
"self": 23.2808961050132
},
"communicator.exchange": {
"total": 1281.274131544913,
"count": 64019,
"is_parallel": true,
"self": 1281.274131544913
},
"steps_from_proto": {
"total": 98.32787074798307,
"count": 64019,
"is_parallel": true,
"self": 20.38281090655437,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.9450598414287,
"count": 512152,
"is_parallel": true,
"self": 77.9450598414287
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 697.4389662209032,
"count": 64020,
"self": 2.943188407879461,
"children": {
"process_trajectory": {
"total": 132.7225719260241,
"count": 64020,
"self": 132.47865304002426,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24391888599984668,
"count": 2,
"self": 0.24391888599984668
}
}
},
"_update_policy": {
"total": 561.7732058869997,
"count": 448,
"self": 313.599070995072,
"children": {
"TorchPPOOptimizer.update": {
"total": 248.17413489192768,
"count": 22812,
"self": 248.17413489192768
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1129995982628316e-06,
"count": 1,
"self": 1.1129995982628316e-06
},
"TrainerController._save_models": {
"total": 0.12338028899921483,
"count": 1,
"self": 0.002047890999165247,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12133239800004958,
"count": 1,
"self": 0.12133239800004958
}
}
}
}
}
}
}