Pyramids / run_logs /timers.json
Monadillo's picture
First push
a1bdb59 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4460318386554718,
"min": 0.44211599230766296,
"max": 1.4537209272384644,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13252.498046875,
"min": 13252.498046875,
"max": 44100.078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989878.0,
"min": 29952.0,
"max": 989878.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989878.0,
"min": 29952.0,
"max": 989878.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5842247009277344,
"min": -0.08802789449691772,
"max": 0.5842247009277344,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 164.75137329101562,
"min": -21.2147216796875,
"max": 164.75137329101562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.3349430561065674,
"min": -0.0015548632945865393,
"max": 1.3349430561065674,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 376.4539489746094,
"min": -0.42603254318237305,
"max": 376.4539489746094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07265878431748465,
"min": 0.06419499818740591,
"max": 0.07265878431748465,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0172229804447852,
"min": 0.4893843500375488,
"max": 1.067990642523749,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.1155083061062864,
"min": 9.186684581824325e-05,
"max": 0.1155083061062864,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 1.6171162854880097,
"min": 0.0011942689956371622,
"max": 1.6171162854880097,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.693511721242856e-06,
"min": 7.693511721242856e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010770916409739998,
"min": 0.00010770916409739998,
"max": 0.0032552750149084002,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256447142857143,
"min": 0.10256447142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359026000000001,
"min": 1.3691136000000002,
"max": 2.4017749,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002661906957142857,
"min": 0.0002661906957142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00372666974,
"min": 0.00372666974,
"max": 0.10853065083999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008882294408977032,
"min": 0.008882294408977032,
"max": 0.350123792886734,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1243521198630333,
"min": 0.1243521198630333,
"max": 2.450866460800171,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 336.20212765957444,
"min": 336.20212765957444,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31603.0,
"min": 15984.0,
"max": 33583.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.578670191320967,
"min": -1.0000000521540642,
"max": 1.578670191320967,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 148.3949979841709,
"min": -32.000001668930054,
"max": 148.3949979841709,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.578670191320967,
"min": -1.0000000521540642,
"max": 1.578670191320967,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 148.3949979841709,
"min": -32.000001668930054,
"max": 148.3949979841709,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.030779373164206963,
"min": 0.030779373164206963,
"max": 6.856070193462074,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8932610774354544,
"min": 2.661539213040669,
"max": 109.69712309539318,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744015930",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744018221"
},
"total": 2290.722073011,
"count": 1,
"self": 0.43676146599955246,
"children": {
"run_training.setup": {
"total": 0.02109555900005944,
"count": 1,
"self": 0.02109555900005944
},
"TrainerController.start_learning": {
"total": 2290.264215986,
"count": 1,
"self": 1.573788936006622,
"children": {
"TrainerController._reset_env": {
"total": 2.2342461240000375,
"count": 1,
"self": 2.2342461240000375
},
"TrainerController.advance": {
"total": 2286.3607311059936,
"count": 63647,
"self": 1.6683506080494226,
"children": {
"env_step": {
"total": 1596.4265780539877,
"count": 63647,
"self": 1424.7984435249587,
"children": {
"SubprocessEnvManager._take_step": {
"total": 170.66241646803428,
"count": 63647,
"self": 5.090897313039477,
"children": {
"TorchPolicy.evaluate": {
"total": 165.5715191549948,
"count": 62573,
"self": 165.5715191549948
}
}
},
"workers": {
"total": 0.9657180609947318,
"count": 63647,
"self": 0.0,
"children": {
"worker_root": {
"total": 2284.5383960819518,
"count": 63647,
"is_parallel": true,
"self": 982.4516564809842,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002339599000151793,
"count": 1,
"is_parallel": true,
"self": 0.000870815999860497,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001468783000291296,
"count": 8,
"is_parallel": true,
"self": 0.001468783000291296
}
}
},
"UnityEnvironment.step": {
"total": 0.07431689399982133,
"count": 1,
"is_parallel": true,
"self": 0.0005651749997923616,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005427040000540728,
"count": 1,
"is_parallel": true,
"self": 0.0005427040000540728
},
"communicator.exchange": {
"total": 0.07157134099998075,
"count": 1,
"is_parallel": true,
"self": 0.07157134099998075
},
"steps_from_proto": {
"total": 0.0016376739999941492,
"count": 1,
"is_parallel": true,
"self": 0.0004172089998064621,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012204650001876871,
"count": 8,
"is_parallel": true,
"self": 0.0012204650001876871
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1302.0867396009676,
"count": 63646,
"is_parallel": true,
"self": 33.15473731183192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.03225522196908,
"count": 63646,
"is_parallel": true,
"self": 24.03225522196908
},
"communicator.exchange": {
"total": 1143.3972427610306,
"count": 63646,
"is_parallel": true,
"self": 1143.3972427610306
},
"steps_from_proto": {
"total": 101.50250430613596,
"count": 63646,
"is_parallel": true,
"self": 21.568221875038944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.93428243109702,
"count": 509168,
"is_parallel": true,
"self": 79.93428243109702
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 688.2658024439565,
"count": 63647,
"self": 3.0796021269979974,
"children": {
"process_trajectory": {
"total": 130.84424155796478,
"count": 63647,
"self": 130.64059839196443,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20364316600034726,
"count": 2,
"self": 0.20364316600034726
}
}
},
"_update_policy": {
"total": 554.3419587589938,
"count": 439,
"self": 302.23017903699065,
"children": {
"TorchPPOOptimizer.update": {
"total": 252.11177972200312,
"count": 22839,
"self": 252.11177972200312
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1299998732283711e-06,
"count": 1,
"self": 1.1299998732283711e-06
},
"TrainerController._save_models": {
"total": 0.095448690000012,
"count": 1,
"self": 0.0012910859995827195,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09415760400042927,
"count": 1,
"self": 0.09415760400042927
}
}
}
}
}
}
}