ppo-Pyramids / run_logs /timers.json
D3MI4N's picture
First Push
f9e83fb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4563881456851959,
"min": 0.4563881456851959,
"max": 1.44721519947052,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13757.3642578125,
"min": 13757.3642578125,
"max": 43902.71875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989968.0,
"min": 29903.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989968.0,
"min": 29903.0,
"max": 989968.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.28028613328933716,
"min": -0.10673309117555618,
"max": 0.3540996015071869,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 72.03353881835938,
"min": -25.722675323486328,
"max": 94.19049072265625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -3.7643654346466064,
"min": -3.7643654346466064,
"max": 0.3398297131061554,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -967.44189453125,
"min": -967.44189453125,
"max": 81.55912780761719,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06917446291620201,
"min": 0.06577435550845342,
"max": 0.07360288635268364,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9684424808268282,
"min": 0.5888230908214691,
"max": 1.030876987960701,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 2.21124804902467,
"min": 0.00021858882359409603,
"max": 2.21124804902467,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 30.95747268634538,
"min": 0.0028416547067232484,
"max": 30.95747268634538,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.63344745555e-06,
"min": 7.63344745555e-06,
"max": 0.0002950140391619875,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010686826437770001,
"min": 0.00010686826437770001,
"max": 0.0033757867747378,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254445000000001,
"min": 0.10254445000000001,
"max": 0.1983380125,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356223000000001,
"min": 1.4356223000000001,
"max": 2.4252622,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002641905550000001,
"min": 0.0002641905550000001,
"max": 0.00983396744875,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036986677700000014,
"min": 0.0036986677700000014,
"max": 0.11254369378000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.019229937344789505,
"min": 0.019229937344789505,
"max": 0.5228409767150879,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.26921913027763367,
"min": 0.26921913027763367,
"max": 4.182727813720703,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 615.1346153846154,
"min": 457.4852941176471,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31987.0,
"min": 17725.0,
"max": 33651.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.9616615065874962,
"min": -0.9999867188433806,
"max": 1.3071470317595146,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 50.0063983425498,
"min": -31.99480165541172,
"max": 88.88599815964699,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.9616615065874962,
"min": -0.9999867188433806,
"max": 1.3071470317595146,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 50.0063983425498,
"min": -31.99480165541172,
"max": 88.88599815964699,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.12207683166679299,
"min": 0.09240759628643419,
"max": 10.130403281913864,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 6.347995246673236,
"min": 5.7659988483646885,
"max": 182.34725907444954,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1725291168",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1725293616"
},
"total": 2448.0873914760004,
"count": 1,
"self": 0.8617520630004947,
"children": {
"run_training.setup": {
"total": 0.05131528299989441,
"count": 1,
"self": 0.05131528299989441
},
"TrainerController.start_learning": {
"total": 2447.17432413,
"count": 1,
"self": 1.7349560708867102,
"children": {
"TrainerController._reset_env": {
"total": 1.8402889809999579,
"count": 1,
"self": 1.8402889809999579
},
"TrainerController.advance": {
"total": 2443.4658142671133,
"count": 63401,
"self": 1.7134043972237123,
"children": {
"env_step": {
"total": 1724.426871784899,
"count": 63401,
"self": 1543.309353418973,
"children": {
"SubprocessEnvManager._take_step": {
"total": 180.0822541440166,
"count": 63401,
"self": 5.460628124933919,
"children": {
"TorchPolicy.evaluate": {
"total": 174.62162601908267,
"count": 62549,
"self": 174.62162601908267
}
}
},
"workers": {
"total": 1.0352642219095287,
"count": 63401,
"self": 0.0,
"children": {
"worker_root": {
"total": 2440.933938096043,
"count": 63401,
"is_parallel": true,
"self": 1039.5990222800424,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020940360000167857,
"count": 1,
"is_parallel": true,
"self": 0.0006395569998858264,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014544790001309593,
"count": 8,
"is_parallel": true,
"self": 0.0014544790001309593
}
}
},
"UnityEnvironment.step": {
"total": 0.053829552000024705,
"count": 1,
"is_parallel": true,
"self": 0.0006610780005757988,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005251039997347107,
"count": 1,
"is_parallel": true,
"self": 0.0005251039997347107
},
"communicator.exchange": {
"total": 0.050836751999668195,
"count": 1,
"is_parallel": true,
"self": 0.050836751999668195
},
"steps_from_proto": {
"total": 0.0018066180000460008,
"count": 1,
"is_parallel": true,
"self": 0.0003727560010702291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014338619989757717,
"count": 8,
"is_parallel": true,
"self": 0.0014338619989757717
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1401.3349158160008,
"count": 63400,
"is_parallel": true,
"self": 37.504137657034335,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.411732322994794,
"count": 63400,
"is_parallel": true,
"self": 25.411732322994794
},
"communicator.exchange": {
"total": 1227.4459511028986,
"count": 63400,
"is_parallel": true,
"self": 1227.4459511028986
},
"steps_from_proto": {
"total": 110.97309473307314,
"count": 63400,
"is_parallel": true,
"self": 23.928493201397032,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.04460153167611,
"count": 507200,
"is_parallel": true,
"self": 87.04460153167611
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 717.3255380849905,
"count": 63401,
"self": 3.183504046019607,
"children": {
"process_trajectory": {
"total": 141.20301701297603,
"count": 63401,
"self": 140.94942152797603,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2535954850000053,
"count": 2,
"self": 0.2535954850000053
}
}
},
"_update_policy": {
"total": 572.9390170259949,
"count": 451,
"self": 323.7157642169914,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.2232528090035,
"count": 22827,
"self": 249.2232528090035
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4620000001741573e-06,
"count": 1,
"self": 1.4620000001741573e-06
},
"TrainerController._save_models": {
"total": 0.13326334900011716,
"count": 1,
"self": 0.002368839000155276,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13089450999996188,
"count": 1,
"self": 0.13089450999996188
}
}
}
}
}
}
}