ppo-Pyramids / run_logs /timers.json
YinuoTHU's picture
First commit.
e0ecd3b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5846028923988342,
"min": 0.5846028923988342,
"max": 1.4821498394012451,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17481.96484375,
"min": 17481.96484375,
"max": 44962.49609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989989.0,
"min": 29884.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989989.0,
"min": 29884.0,
"max": 989989.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3721213638782501,
"min": -0.11277265846729279,
"max": 0.4167194962501526,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 96.75155639648438,
"min": -27.06543731689453,
"max": 112.09754180908203,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.003546916414052248,
"min": -0.0064742472022771835,
"max": 0.17866326868534088,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.9221982955932617,
"min": -1.7415724992752075,
"max": 42.34319305419922,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07173565085191431,
"min": 0.06528372986092823,
"max": 0.07273674955026113,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0042991119268003,
"min": 0.4960903806150918,
"max": 1.090359085588716,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013970981003763547,
"min": 0.00030457366726757647,
"max": 0.0166651769267666,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19559373405268965,
"min": 0.0033503103399433413,
"max": 0.249977653901499,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.322797559099996e-06,
"min": 7.322797559099996e-06,
"max": 0.0002952358301594857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010251916582739995,
"min": 0.00010251916582739995,
"max": 0.0032562134145955996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10244089999999999,
"min": 0.10244089999999999,
"max": 0.19841194285714286,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341726,
"min": 1.3888836,
"max": 2.4431753,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025384590999999993,
"min": 0.00025384590999999993,
"max": 0.00984135309142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003553842739999999,
"min": 0.003553842739999999,
"max": 0.10856189956,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006191181484609842,
"min": 0.006157424300909042,
"max": 0.29541710019111633,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08667653799057007,
"min": 0.08620394021272659,
"max": 2.0679197311401367,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 481.41379310344826,
"min": 422.0416666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27922.0,
"min": 16555.0,
"max": 34387.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2771413545927097,
"min": -0.9999750521965325,
"max": 1.4390277495193813,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 74.07419856637716,
"min": -31.99920167028904,
"max": 103.60999796539545,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2771413545927097,
"min": -0.9999750521965325,
"max": 1.4390277495193813,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 74.07419856637716,
"min": -31.99920167028904,
"max": 103.60999796539545,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03143329427257413,
"min": 0.027785963575802777,
"max": 5.425608523628291,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.8231310678092996,
"min": 1.8231310678092996,
"max": 92.23534490168095,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1766479011",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1766481209"
},
"total": 2197.9439772759997,
"count": 1,
"self": 0.47825325400026486,
"children": {
"run_training.setup": {
"total": 0.023167831999899136,
"count": 1,
"self": 0.023167831999899136
},
"TrainerController.start_learning": {
"total": 2197.44255619,
"count": 1,
"self": 1.3448346859554476,
"children": {
"TrainerController._reset_env": {
"total": 2.0236905210001623,
"count": 1,
"self": 2.0236905210001623
},
"TrainerController.advance": {
"total": 2193.998152698044,
"count": 63450,
"self": 1.3721305240319452,
"children": {
"env_step": {
"total": 1542.542601257043,
"count": 63450,
"self": 1393.8587554351163,
"children": {
"SubprocessEnvManager._take_step": {
"total": 147.87780798197946,
"count": 63450,
"self": 4.620466329935198,
"children": {
"TorchPolicy.evaluate": {
"total": 143.25734165204426,
"count": 62558,
"self": 143.25734165204426
}
}
},
"workers": {
"total": 0.8060378399472938,
"count": 63450,
"self": 0.0,
"children": {
"worker_root": {
"total": 2191.225857128939,
"count": 63450,
"is_parallel": true,
"self": 913.5161572529353,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0026047249998555344,
"count": 1,
"is_parallel": true,
"self": 0.0005853910004134377,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020193339994420967,
"count": 8,
"is_parallel": true,
"self": 0.0020193339994420967
}
}
},
"UnityEnvironment.step": {
"total": 0.05367206999994778,
"count": 1,
"is_parallel": true,
"self": 0.0006147719998352841,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004665520000344259,
"count": 1,
"is_parallel": true,
"self": 0.0004665520000344259
},
"communicator.exchange": {
"total": 0.05076658600000883,
"count": 1,
"is_parallel": true,
"self": 0.05076658600000883
},
"steps_from_proto": {
"total": 0.0018241600000692415,
"count": 1,
"is_parallel": true,
"self": 0.0005462059998535551,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012779540002156864,
"count": 8,
"is_parallel": true,
"self": 0.0012779540002156864
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1277.7096998760037,
"count": 63449,
"is_parallel": true,
"self": 34.24955844488977,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.65027151205095,
"count": 63449,
"is_parallel": true,
"self": 23.65027151205095
},
"communicator.exchange": {
"total": 1110.0538563030195,
"count": 63449,
"is_parallel": true,
"self": 1110.0538563030195
},
"steps_from_proto": {
"total": 109.75601361604345,
"count": 63449,
"is_parallel": true,
"self": 22.656720485984124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.09929313005932,
"count": 507592,
"is_parallel": true,
"self": 87.09929313005932
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 650.0834209169689,
"count": 63450,
"self": 2.617431091952767,
"children": {
"process_trajectory": {
"total": 120.79282126502017,
"count": 63450,
"self": 120.60044500301979,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19237626200037994,
"count": 2,
"self": 0.19237626200037994
}
}
},
"_update_policy": {
"total": 526.6731685599959,
"count": 445,
"self": 294.5492278100203,
"children": {
"TorchPPOOptimizer.update": {
"total": 232.12394074997565,
"count": 22851,
"self": 232.12394074997565
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3019998732488602e-06,
"count": 1,
"self": 1.3019998732488602e-06
},
"TrainerController._save_models": {
"total": 0.07587698300039847,
"count": 1,
"self": 0.0010460390003572684,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0748309440000412,
"count": 1,
"self": 0.0748309440000412
}
}
}
}
}
}
}