Pyramid / run_logs /timers.json
StefanoCaloni's picture
first push
fa0606e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4642966687679291,
"min": 0.4642966687679291,
"max": 1.484009861946106,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13943.7578125,
"min": 13943.7578125,
"max": 45018.921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989965.0,
"min": 29898.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989965.0,
"min": 29898.0,
"max": 989965.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.021825531497597694,
"min": -0.09786876291036606,
"max": 0.09173505008220673,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 5.369080543518066,
"min": -23.58637237548828,
"max": 22.62625503540039,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.3118276000022888,
"min": -0.43863627314567566,
"max": 0.3407142162322998,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -76.70958709716797,
"min": -108.78179931640625,
"max": 80.749267578125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06967869530473013,
"min": 0.06509801640693934,
"max": 0.0739830561770861,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.045180429570952,
"min": 0.4945994608832494,
"max": 1.045180429570952,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.067979579320287,
"min": 0.0003506637609488277,
"max": 0.08851575417997164,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 1.019693689804305,
"min": 0.004909292653283588,
"max": 1.239220558519603,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.530097490000003e-06,
"min": 7.530097490000003e-06,
"max": 0.0002952352301596857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011295146235000004,
"min": 0.00011295146235000004,
"max": 0.0033829793723402996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251000000000002,
"min": 0.10251000000000002,
"max": 0.19841174285714283,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376500000000002,
"min": 1.3888821999999998,
"max": 2.5276597000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002607490000000001,
"min": 0.0002607490000000001,
"max": 0.00984133311142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003911235000000002,
"min": 0.003911235000000002,
"max": 0.11279320403,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009491994976997375,
"min": 0.008725428022444248,
"max": 0.3398001492023468,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14237992465496063,
"min": 0.12215598672628403,
"max": 2.37860107421875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 825.6857142857143,
"min": 748.4102564102565,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28899.0,
"min": 16569.0,
"max": 32554.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.14031432356153214,
"min": -1.0000000521540642,
"max": 0.3282409902566519,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -4.9110013246536255,
"min": -32.000001668930054,
"max": 12.801398620009422,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.14031432356153214,
"min": -1.0000000521540642,
"max": 0.3282409902566519,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -4.9110013246536255,
"min": -32.000001668930054,
"max": 12.801398620009422,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08124714892557157,
"min": 0.07665323025975267,
"max": 6.185329051578746,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.843650212395005,
"min": 2.8361695196108485,
"max": 105.15059387683868,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1694363475",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1694365519"
},
"total": 2044.0806629070003,
"count": 1,
"self": 0.4927108330005012,
"children": {
"run_training.setup": {
"total": 0.04433304600001975,
"count": 1,
"self": 0.04433304600001975
},
"TrainerController.start_learning": {
"total": 2043.5436190279997,
"count": 1,
"self": 1.3771754659851467,
"children": {
"TrainerController._reset_env": {
"total": 4.156572452999853,
"count": 1,
"self": 4.156572452999853
},
"TrainerController.advance": {
"total": 2037.915302879015,
"count": 63180,
"self": 1.3317532609969476,
"children": {
"env_step": {
"total": 1379.314795601962,
"count": 63180,
"self": 1271.876302146006,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.62875964598447,
"count": 63180,
"self": 4.687830542979555,
"children": {
"TorchPolicy.evaluate": {
"total": 101.94092910300492,
"count": 62557,
"self": 101.94092910300492
}
}
},
"workers": {
"total": 0.8097338099714761,
"count": 63180,
"self": 0.0,
"children": {
"worker_root": {
"total": 2039.2797932900069,
"count": 63180,
"is_parallel": true,
"self": 876.2746777990403,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017246040001737128,
"count": 1,
"is_parallel": true,
"self": 0.0005186970001886948,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001205906999985018,
"count": 8,
"is_parallel": true,
"self": 0.001205906999985018
}
}
},
"UnityEnvironment.step": {
"total": 0.04870490199982669,
"count": 1,
"is_parallel": true,
"self": 0.0005617659994641144,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005240820000835811,
"count": 1,
"is_parallel": true,
"self": 0.0005240820000835811
},
"communicator.exchange": {
"total": 0.04570557000010922,
"count": 1,
"is_parallel": true,
"self": 0.04570557000010922
},
"steps_from_proto": {
"total": 0.001913484000169774,
"count": 1,
"is_parallel": true,
"self": 0.0003808619999290386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015326220002407354,
"count": 8,
"is_parallel": true,
"self": 0.0015326220002407354
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1163.0051154909665,
"count": 63179,
"is_parallel": true,
"self": 33.56994190689488,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.235461105013655,
"count": 63179,
"is_parallel": true,
"self": 23.235461105013655
},
"communicator.exchange": {
"total": 1002.60224397604,
"count": 63179,
"is_parallel": true,
"self": 1002.60224397604
},
"steps_from_proto": {
"total": 103.59746850301804,
"count": 63179,
"is_parallel": true,
"self": 20.00788754392397,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.58958095909406,
"count": 505432,
"is_parallel": true,
"self": 83.58958095909406
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 657.2687540160559,
"count": 63180,
"self": 2.4335144220012808,
"children": {
"process_trajectory": {
"total": 108.73936469205478,
"count": 63180,
"self": 108.52997547605469,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20938921600009053,
"count": 2,
"self": 0.20938921600009053
}
}
},
"_update_policy": {
"total": 546.0958749019999,
"count": 442,
"self": 355.6355490569813,
"children": {
"TorchPPOOptimizer.update": {
"total": 190.4603258450186,
"count": 22767,
"self": 190.4603258450186
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0489998203411233e-06,
"count": 1,
"self": 1.0489998203411233e-06
},
"TrainerController._save_models": {
"total": 0.09456718100000217,
"count": 1,
"self": 0.0015239540002767171,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09304322699972545,
"count": 1,
"self": 0.09304322699972545
}
}
}
}
}
}
}