Pyra / run_logs /timers.json
Skie0007's picture
Pyra
879bb4e
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3095568120479584,
"min": 0.3032616674900055,
"max": 1.398167371749878,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9286.7041015625,
"min": 9141.51953125,
"max": 42414.8046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989937.0,
"min": 29952.0,
"max": 989937.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989937.0,
"min": 29952.0,
"max": 989937.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6718105673789978,
"min": -0.0989900454878807,
"max": 0.6718105673789978,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 197.51231384277344,
"min": -23.757610321044922,
"max": 197.51231384277344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.012169343419373035,
"min": -0.034034181386232376,
"max": 0.340353399515152,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.577786922454834,
"min": -8.88292121887207,
"max": 80.66375732421875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0664614228333535,
"min": 0.06537212099667636,
"max": 0.07339101404936736,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9304599196669491,
"min": 0.4961760442671513,
"max": 1.0946579658581563,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017766982927111843,
"min": 0.0006795215624811844,
"max": 0.017766982927111843,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2487377609795658,
"min": 0.004756650937368291,
"max": 0.2487377609795658,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.312811848142857e-06,
"min": 7.312811848142857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010237936587399999,
"min": 0.00010237936587399999,
"max": 0.0033794447735185,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243757142857142,
"min": 0.10243757142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341259999999998,
"min": 1.3691136000000002,
"max": 2.5264815,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025351338571428573,
"min": 0.00025351338571428573,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035491874000000002,
"min": 0.0035491874000000002,
"max": 0.11267550185,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.0102896299213171,
"min": 0.01018145214766264,
"max": 0.4057447612285614,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1440548151731491,
"min": 0.1440548151731491,
"max": 2.8402132987976074,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 282.6371681415929,
"min": 282.6371681415929,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31938.0,
"min": 15984.0,
"max": 32969.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6812803447246552,
"min": -1.0000000521540642,
"max": 1.6887821707837651,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 188.30339860916138,
"min": -32.000001668930054,
"max": 188.30339860916138,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6812803447246552,
"min": -1.0000000521540642,
"max": 1.6887821707837651,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 188.30339860916138,
"min": -32.000001668930054,
"max": 188.30339860916138,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029473348894628413,
"min": 0.029473348894628413,
"max": 7.22762886621058,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.3010150761983823,
"min": 3.181157567509217,
"max": 115.64206185936928,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692192555",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692194992"
},
"total": 2436.992459815,
"count": 1,
"self": 0.5258408330000748,
"children": {
"run_training.setup": {
"total": 0.0404048390000753,
"count": 1,
"self": 0.0404048390000753
},
"TrainerController.start_learning": {
"total": 2436.426214143,
"count": 1,
"self": 1.5273461460278668,
"children": {
"TrainerController._reset_env": {
"total": 4.160288271999889,
"count": 1,
"self": 4.160288271999889
},
"TrainerController.advance": {
"total": 2430.6400292709723,
"count": 64203,
"self": 1.5485170009023932,
"children": {
"env_step": {
"total": 1738.9232867290466,
"count": 64203,
"self": 1622.211074631048,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.77913239503141,
"count": 64203,
"self": 5.180402942013643,
"children": {
"TorchPolicy.evaluate": {
"total": 110.59872945301777,
"count": 62549,
"self": 110.59872945301777
}
}
},
"workers": {
"total": 0.933079702967234,
"count": 64203,
"self": 0.0,
"children": {
"worker_root": {
"total": 2430.515098886929,
"count": 64203,
"is_parallel": true,
"self": 932.6499854109318,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001753835000045001,
"count": 1,
"is_parallel": true,
"self": 0.0005451980002817436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012086369997632573,
"count": 8,
"is_parallel": true,
"self": 0.0012086369997632573
}
}
},
"UnityEnvironment.step": {
"total": 0.06420976100002918,
"count": 1,
"is_parallel": true,
"self": 0.0005740160002005723,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005153820000032283,
"count": 1,
"is_parallel": true,
"self": 0.0005153820000032283
},
"communicator.exchange": {
"total": 0.06112837099999524,
"count": 1,
"is_parallel": true,
"self": 0.06112837099999524
},
"steps_from_proto": {
"total": 0.001991991999830134,
"count": 1,
"is_parallel": true,
"self": 0.00040325199961444014,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015887400002156937,
"count": 8,
"is_parallel": true,
"self": 0.0015887400002156937
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1497.8651134759973,
"count": 64202,
"is_parallel": true,
"self": 35.78623093093188,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.708459393006706,
"count": 64202,
"is_parallel": true,
"self": 23.708459393006706
},
"communicator.exchange": {
"total": 1327.306213158031,
"count": 64202,
"is_parallel": true,
"self": 1327.306213158031
},
"steps_from_proto": {
"total": 111.0642099940278,
"count": 64202,
"is_parallel": true,
"self": 22.16578573504171,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.8984242589861,
"count": 513616,
"is_parallel": true,
"self": 88.8984242589861
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 690.1682255410235,
"count": 64203,
"self": 2.965053096094607,
"children": {
"process_trajectory": {
"total": 117.5684420469297,
"count": 64203,
"self": 117.28993616593016,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2785058809995462,
"count": 2,
"self": 0.2785058809995462
}
}
},
"_update_policy": {
"total": 569.6347303979992,
"count": 450,
"self": 367.41860711900154,
"children": {
"TorchPPOOptimizer.update": {
"total": 202.21612327899766,
"count": 22869,
"self": 202.21612327899766
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.259997568733525e-07,
"count": 1,
"self": 9.259997568733525e-07
},
"TrainerController._save_models": {
"total": 0.09854952800014871,
"count": 1,
"self": 0.0015566450001642806,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09699288299998443,
"count": 1,
"self": 0.09699288299998443
}
}
}
}
}
}
}