ppo-Pyramids / run_logs /timers.json
yunk3r's picture
2nd Push
7c33604 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.21720464527606964,
"min": 0.20768827199935913,
"max": 0.29807475209236145,
"count": 17
},
"Pyramids.Policy.Entropy.sum": {
"value": 6460.53515625,
"min": 6210.7099609375,
"max": 8664.7734375,
"count": 17
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 279.6574074074074,
"min": 260.4086956521739,
"max": 345.0444444444444,
"count": 17
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30203.0,
"min": 16795.0,
"max": 31320.0,
"count": 17
},
"Pyramids.Step.mean": {
"value": 1499980.0,
"min": 1019934.0,
"max": 1499980.0,
"count": 17
},
"Pyramids.Step.sum": {
"value": 1499980.0,
"min": 1019934.0,
"max": 1499980.0,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6857998371124268,
"min": 0.5569564700126648,
"max": 0.6999236941337585,
"count": 17
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 197.51034545898438,
"min": 106.35494995117188,
"max": 207.87733459472656,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011466632597148418,
"min": -0.003921444062143564,
"max": 0.08421960473060608,
"count": 17
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.3023900985717773,
"min": -1.0901614427566528,
"max": 24.002588272094727,
"count": 17
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7042280218311558,
"min": 1.502714267716958,
"max": 1.7395912886961646,
"count": 17
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 182.35239833593369,
"min": 99.20499943196774,
"max": 200.05299820005894,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7042280218311558,
"min": 1.502714267716958,
"max": 1.7395912886961646,
"count": 17
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 182.35239833593369,
"min": 99.20499943196774,
"max": 200.05299820005894,
"count": 17
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.024416481507300164,
"min": 0.022409672142715532,
"max": 0.03160340793149042,
"count": 17
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6125635212811176,
"min": 1.6711126518785022,
"max": 3.0019219083333155,
"count": 17
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06975638308323964,
"min": 0.06622975742485485,
"max": 0.07178942280422364,
"count": 17
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9765893631653549,
"min": 0.6101926534708279,
"max": 1.01275189212916,
"count": 17
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016813369541307025,
"min": 0.01431408897596633,
"max": 0.017236776922973582,
"count": 17
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23538717357829836,
"min": 0.1338397917764572,
"max": 0.24501954557490538,
"count": 17
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.9738561516047628e-06,
"min": 2.9738561516047628e-06,
"max": 9.789306736899999e-05,
"count": 17
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.1633986122466676e-05,
"min": 4.1633986122466676e-05,
"max": 0.0013018851660385998,
"count": 17
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1009912523809524,
"min": 0.1009912523809524,
"max": 0.13263100000000005,
"count": 17
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4138775333333335,
"min": 1.1936790000000004,
"max": 1.9048246000000002,
"count": 17
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0001090261128571429,
"min": 0.0001090261128571429,
"max": 0.0032698369000000002,
"count": 17
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0015263655800000005,
"min": 0.0015263655800000005,
"max": 0.04349274386,
"count": 17
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00849104393273592,
"min": 0.0080970274284482,
"max": 0.009547818452119827,
"count": 17
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11887460947036743,
"min": 0.08593036234378815,
"max": 0.13323572278022766,
"count": 17
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 17
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726704575",
"python_version": "3.10.14 (main, Apr 17 2024, 00:00:00) [GCC 13.2.1 20240316 (Red Hat 13.2.1-7)]",
"command_line_arguments": "/home/michael/Repo/DeepRLCourse/venv/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+rocm6.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1726705048"
},
"total": 473.71785410499433,
"count": 1,
"self": 0.21809501701500267,
"children": {
"run_training.setup": {
"total": 0.01654465199680999,
"count": 1,
"self": 0.01654465199680999
},
"TrainerController.start_learning": {
"total": 473.4832144359825,
"count": 1,
"self": 0.43410689767915756,
"children": {
"TrainerController._reset_env": {
"total": 1.1233860200154595,
"count": 1,
"self": 1.1233860200154595
},
"TrainerController.advance": {
"total": 471.8779994753131,
"count": 32645,
"self": 0.3900848307821434,
"children": {
"env_step": {
"total": 285.53448172067874,
"count": 32645,
"self": 236.0893001385557,
"children": {
"SubprocessEnvManager._take_step": {
"total": 49.193108400155324,
"count": 32645,
"self": 1.0925407735048793,
"children": {
"TorchPolicy.evaluate": {
"total": 48.100567626650445,
"count": 31290,
"self": 48.100567626650445
}
}
},
"workers": {
"total": 0.25207318196771666,
"count": 32645,
"self": 0.0,
"children": {
"worker_root": {
"total": 472.8673126748181,
"count": 32645,
"is_parallel": true,
"self": 262.82770341748255,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011030320019926876,
"count": 1,
"is_parallel": true,
"self": 0.0003565579536370933,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007464740483555943,
"count": 8,
"is_parallel": true,
"self": 0.0007464740483555943
}
}
},
"UnityEnvironment.step": {
"total": 0.016810662986245006,
"count": 1,
"is_parallel": true,
"self": 0.00019693400827236474,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001981329987756908,
"count": 1,
"is_parallel": true,
"self": 0.0001981329987756908
},
"communicator.exchange": {
"total": 0.015795077983057126,
"count": 1,
"is_parallel": true,
"self": 0.015795077983057126
},
"steps_from_proto": {
"total": 0.0006205179961398244,
"count": 1,
"is_parallel": true,
"self": 0.0001533749746158719,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0004671430215239525,
"count": 8,
"is_parallel": true,
"self": 0.0004671430215239525
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 210.03960925733554,
"count": 32644,
"is_parallel": true,
"self": 5.756008195952745,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.313196824630722,
"count": 32644,
"is_parallel": true,
"self": 4.313196824630722
},
"communicator.exchange": {
"total": 182.56862023012945,
"count": 32644,
"is_parallel": true,
"self": 182.56862023012945
},
"steps_from_proto": {
"total": 17.40178400662262,
"count": 32644,
"is_parallel": true,
"self": 4.037765364308143,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.364018642314477,
"count": 261152,
"is_parallel": true,
"self": 13.364018642314477
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 185.95343292385223,
"count": 32645,
"self": 0.8427105708105955,
"children": {
"process_trajectory": {
"total": 36.69104316688026,
"count": 32645,
"self": 36.632464149879524,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05857901700073853,
"count": 1,
"self": 0.05857901700073853
}
}
},
"_update_policy": {
"total": 148.41967918616137,
"count": 236,
"self": 82.20424556016224,
"children": {
"TorchPPOOptimizer.update": {
"total": 66.21543362599914,
"count": 11352,
"self": 66.21543362599914
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.599868018180132e-07,
"count": 1,
"self": 5.599868018180132e-07
},
"TrainerController._save_models": {
"total": 0.04772148298798129,
"count": 1,
"self": 0.0014789959823247045,
"children": {
"RLTrainer._checkpoint": {
"total": 0.046242487005656585,
"count": 1,
"self": 0.046242487005656585
}
}
}
}
}
}
}