Pyramids-v1 / run_logs /timers.json
YisusLn's picture
My Pyramid model
3f017f3 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4764539897441864,
"min": 0.4764539897441864,
"max": 1.4272775650024414,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14385.0986328125,
"min": 14385.0986328125,
"max": 43297.890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989940.0,
"min": 29980.0,
"max": 989940.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989940.0,
"min": 29980.0,
"max": 989940.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5667742490768433,
"min": -0.08340947329998016,
"max": 0.6606265306472778,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 158.13002014160156,
"min": -20.101682662963867,
"max": 190.26043701171875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.11515995115041733,
"min": -0.003017711453139782,
"max": 0.5006617903709412,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 32.1296272277832,
"min": -0.8027112483978271,
"max": 119.15750122070312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06712305085689182,
"min": 0.06488227691640269,
"max": 0.07764958733757185,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9397227119964856,
"min": 0.5435471113630029,
"max": 1.1090968950844473,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017082306797549664,
"min": 0.0006127794450074022,
"max": 0.017082306797549664,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23915229516569528,
"min": 0.007966132785096228,
"max": 0.24142436902426806,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.70523314590714e-06,
"min": 7.70523314590714e-06,
"max": 0.0002952372015876,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010787326404269997,
"min": 0.00010787326404269997,
"max": 0.0036340465886512,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256837857142857,
"min": 0.10256837857142857,
"max": 0.1984124,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359573,
"min": 1.3888867999999999,
"max": 2.6113488000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002665810192857142,
"min": 0.0002665810192857142,
"max": 0.009841398759999998,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037321342699999987,
"min": 0.0037321342699999987,
"max": 0.12115374512,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011156298220157623,
"min": 0.011127087287604809,
"max": 0.4877972900867462,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15618817508220673,
"min": 0.15577922761440277,
"max": 3.414581060409546,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 331.80808080808083,
"min": 292.0377358490566,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32849.0,
"min": 16651.0,
"max": 34085.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5841938578808794,
"min": -0.9997600515683492,
"max": 1.6676139868795872,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 155.25099807232618,
"min": -29.992801547050476,
"max": 175.10119865834713,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5841938578808794,
"min": -0.9997600515683492,
"max": 1.6676139868795872,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 155.25099807232618,
"min": -29.992801547050476,
"max": 175.10119865834713,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03865347308200565,
"min": 0.03555533070292133,
"max": 10.289797559380531,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.788040362036554,
"min": 3.285915789019782,
"max": 174.92655850946903,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1725980980",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1725984278"
},
"total": 3298.5214353949996,
"count": 1,
"self": 0.6432477890002701,
"children": {
"run_training.setup": {
"total": 0.07870711799932906,
"count": 1,
"self": 0.07870711799932906
},
"TrainerController.start_learning": {
"total": 3297.799480488,
"count": 1,
"self": 2.38693228689408,
"children": {
"TrainerController._reset_env": {
"total": 2.4741040600001725,
"count": 1,
"self": 2.4741040600001725
},
"TrainerController.advance": {
"total": 3292.8451459541047,
"count": 63955,
"self": 2.5633692985311427,
"children": {
"env_step": {
"total": 2200.050960764842,
"count": 63955,
"self": 2034.676146306826,
"children": {
"SubprocessEnvManager._take_step": {
"total": 163.89855446397632,
"count": 63955,
"self": 7.387101810763852,
"children": {
"TorchPolicy.evaluate": {
"total": 156.51145265321247,
"count": 62554,
"self": 156.51145265321247
}
}
},
"workers": {
"total": 1.4762599940395376,
"count": 63955,
"self": 0.0,
"children": {
"worker_root": {
"total": 3290.479997236016,
"count": 63955,
"is_parallel": true,
"self": 1448.0561869931362,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003337672000270686,
"count": 1,
"is_parallel": true,
"self": 0.0011844049986393657,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021532670016313205,
"count": 8,
"is_parallel": true,
"self": 0.0021532670016313205
}
}
},
"UnityEnvironment.step": {
"total": 0.07182861900037096,
"count": 1,
"is_parallel": true,
"self": 0.001029514000947529,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000576311000259011,
"count": 1,
"is_parallel": true,
"self": 0.000576311000259011
},
"communicator.exchange": {
"total": 0.06753455899979599,
"count": 1,
"is_parallel": true,
"self": 0.06753455899979599
},
"steps_from_proto": {
"total": 0.002688234999368433,
"count": 1,
"is_parallel": true,
"self": 0.0005794039971078746,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021088310022605583,
"count": 8,
"is_parallel": true,
"self": 0.0021088310022605583
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1842.4238102428799,
"count": 63954,
"is_parallel": true,
"self": 51.89029461487644,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.299956191935053,
"count": 63954,
"is_parallel": true,
"self": 31.299956191935053
},
"communicator.exchange": {
"total": 1626.961018433999,
"count": 63954,
"is_parallel": true,
"self": 1626.961018433999
},
"steps_from_proto": {
"total": 132.27254100206937,
"count": 63954,
"is_parallel": true,
"self": 28.554598322202764,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.71794267986661,
"count": 511632,
"is_parallel": true,
"self": 103.71794267986661
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1090.2308158907317,
"count": 63955,
"self": 4.742546684825356,
"children": {
"process_trajectory": {
"total": 171.39340981390433,
"count": 63955,
"self": 171.14427214490297,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2491376690013567,
"count": 2,
"self": 0.2491376690013567
}
}
},
"_update_policy": {
"total": 914.094859392002,
"count": 457,
"self": 374.71823136003695,
"children": {
"TorchPPOOptimizer.update": {
"total": 539.3766280319651,
"count": 22767,
"self": 539.3766280319651
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.520008461549878e-07,
"count": 1,
"self": 9.520008461549878e-07
},
"TrainerController._save_models": {
"total": 0.09329723500013642,
"count": 1,
"self": 0.001990328999454505,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09130690600068192,
"count": 1,
"self": 0.09130690600068192
}
}
}
}
}
}
}