ppo-PyramidsRND / run_logs /timers.json
creogenus's picture
Initial Params
d81c640 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5068792700767517,
"min": 0.5068792700767517,
"max": 1.4242726564407349,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 15198.267578125,
"min": 15198.267578125,
"max": 43206.734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989879.0,
"min": 29952.0,
"max": 989879.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5484990477561951,
"min": -0.12224862724542618,
"max": 0.5484990477561951,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 153.03123474121094,
"min": -29.4619197845459,
"max": 153.03123474121094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0894196629524231,
"min": -0.06227090582251549,
"max": 0.537778913974762,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 24.94808578491211,
"min": -17.37358283996582,
"max": 127.45360565185547,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06789372474720169,
"min": 0.06546683811354449,
"max": 0.07335847620382042,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9505121464608237,
"min": 0.513509333426743,
"max": 1.0369407651984752,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.019398938021151394,
"min": 0.0008736946234847977,
"max": 0.019398938021151394,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2715851322961195,
"min": 0.010484335481817573,
"max": 0.2715851322961195,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4536618011928534e-06,
"min": 7.4536618011928534e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010435126521669995,
"min": 0.00010435126521669995,
"max": 0.0036342112885962992,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248452142857144,
"min": 0.10248452142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347833,
"min": 1.3886848,
"max": 2.6114037,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002582036907142856,
"min": 0.0002582036907142856,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036148516699999983,
"min": 0.0036148516699999983,
"max": 0.12115922963,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013974979519844055,
"min": 0.013974979519844055,
"max": 0.5730299353599548,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19564971327781677,
"min": 0.19564971327781677,
"max": 4.011209487915039,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 352.3764705882353,
"min": 337.42528735632186,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29952.0,
"min": 15984.0,
"max": 32980.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5770117479212145,
"min": -1.0000000521540642,
"max": 1.6165792889807415,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 134.04599857330322,
"min": -29.183401599526405,
"max": 140.64239814132452,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5770117479212145,
"min": -1.0000000521540642,
"max": 1.6165792889807415,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 134.04599857330322,
"min": -29.183401599526405,
"max": 140.64239814132452,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0513782316709266,
"min": 0.050277656997736654,
"max": 11.868546033278108,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.367149692028761,
"min": 4.367149692028761,
"max": 189.89673653244972,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740235760",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740237966"
},
"total": 2205.843345387,
"count": 1,
"self": 0.47572103000038624,
"children": {
"run_training.setup": {
"total": 0.019403168999815534,
"count": 1,
"self": 0.019403168999815534
},
"TrainerController.start_learning": {
"total": 2205.348221188,
"count": 1,
"self": 1.3324631301056797,
"children": {
"TrainerController._reset_env": {
"total": 2.122890899999902,
"count": 1,
"self": 2.122890899999902
},
"TrainerController.advance": {
"total": 2201.805272837895,
"count": 63734,
"self": 1.310865644889418,
"children": {
"env_step": {
"total": 1534.379492384073,
"count": 63734,
"self": 1382.57479416901,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.03295172999492,
"count": 63734,
"self": 4.613005919019315,
"children": {
"TorchPolicy.evaluate": {
"total": 146.4199458109756,
"count": 62573,
"self": 146.4199458109756
}
}
},
"workers": {
"total": 0.7717464850682063,
"count": 63734,
"self": 0.0,
"children": {
"worker_root": {
"total": 2200.3902346630216,
"count": 63734,
"is_parallel": true,
"self": 929.1048737610283,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020038119998844195,
"count": 1,
"is_parallel": true,
"self": 0.0006526329998450819,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013511790000393376,
"count": 8,
"is_parallel": true,
"self": 0.0013511790000393376
}
}
},
"UnityEnvironment.step": {
"total": 0.04622816799997054,
"count": 1,
"is_parallel": true,
"self": 0.0005513619998964714,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046977700003481004,
"count": 1,
"is_parallel": true,
"self": 0.00046977700003481004
},
"communicator.exchange": {
"total": 0.04362384299997757,
"count": 1,
"is_parallel": true,
"self": 0.04362384299997757
},
"steps_from_proto": {
"total": 0.001583186000061687,
"count": 1,
"is_parallel": true,
"self": 0.0003243779997319507,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012588080003297364,
"count": 8,
"is_parallel": true,
"self": 0.0012588080003297364
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1271.2853609019933,
"count": 63733,
"is_parallel": true,
"self": 31.303316082004585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.258138698987295,
"count": 63733,
"is_parallel": true,
"self": 23.258138698987295
},
"communicator.exchange": {
"total": 1120.2953386080212,
"count": 63733,
"is_parallel": true,
"self": 1120.2953386080212
},
"steps_from_proto": {
"total": 96.42856751298018,
"count": 63733,
"is_parallel": true,
"self": 19.44254916807131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.98601834490887,
"count": 509864,
"is_parallel": true,
"self": 76.98601834490887
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.1149148089323,
"count": 63734,
"self": 2.5814839528866287,
"children": {
"process_trajectory": {
"total": 127.13588374704796,
"count": 63734,
"self": 126.93237409404719,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20350965300076496,
"count": 2,
"self": 0.20350965300076496
}
}
},
"_update_policy": {
"total": 536.3975471089977,
"count": 456,
"self": 295.539465739,
"children": {
"TorchPPOOptimizer.update": {
"total": 240.85808136999776,
"count": 22785,
"self": 240.85808136999776
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1829997674794868e-06,
"count": 1,
"self": 1.1829997674794868e-06
},
"TrainerController._save_models": {
"total": 0.08759313700011262,
"count": 1,
"self": 0.0014959399995859712,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08609719700052665,
"count": 1,
"self": 0.08609719700052665
}
}
}
}
}
}
}