ppo-Pyramids / run_logs /timers.json
aeft's picture
First Push
f56e129
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.40752285718917847,
"min": 0.40752285718917847,
"max": 1.49339759349823,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12206.125,
"min": 12206.125,
"max": 45303.7109375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989948.0,
"min": 29912.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989948.0,
"min": 29912.0,
"max": 989948.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3860066831111908,
"min": -0.10693488270044327,
"max": 0.4617581367492676,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 100.36174011230469,
"min": -25.87824249267578,
"max": 125.1364517211914,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.023606572300195694,
"min": -0.008749131113290787,
"max": 0.3732135593891144,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.13770866394043,
"min": -2.2922723293304443,
"max": 88.45161437988281,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07030392752078894,
"min": 0.06607135672155502,
"max": 0.0743340314427094,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9842549852910452,
"min": 0.5203382200989658,
"max": 1.1073169790152289,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013754318370566967,
"min": 0.00029005192281029605,
"max": 0.015066435062792398,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19256045718793754,
"min": 0.003480623073723553,
"max": 0.21093009087909356,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.678640297628573e-06,
"min": 7.678640297628573e-06,
"max": 0.0002952346301598857,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010750096416680001,
"min": 0.00010750096416680001,
"max": 0.0035080280306573996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255951428571428,
"min": 0.10255951428571428,
"max": 0.19841154285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358331999999998,
"min": 1.3888808,
"max": 2.5693426,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002656954771428572,
"min": 0.0002656954771428572,
"max": 0.00984131313142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003719736680000001,
"min": 0.003719736680000001,
"max": 0.11695732573999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009198606014251709,
"min": 0.009112351574003696,
"max": 0.3367597758769989,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12878048419952393,
"min": 0.1275729238986969,
"max": 2.35731840133667,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 444.42028985507244,
"min": 385.3943661971831,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30665.0,
"min": 16711.0,
"max": 33170.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.2989352684029762,
"min": -0.9999500517733395,
"max": 1.4908816716830495,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 88.32759825140238,
"min": -31.998401656746864,
"max": 107.45699825882912,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.2989352684029762,
"min": -0.9999500517733395,
"max": 1.4908816716830495,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 88.32759825140238,
"min": -31.998401656746864,
"max": 107.45699825882912,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04261775434732793,
"min": 0.03818512734378109,
"max": 6.163425108965705,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8980072956182994,
"min": 2.8256994234398007,
"max": 104.77822685241699,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692412996",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692415406"
},
"total": 2409.825752377,
"count": 1,
"self": 1.3926586269999461,
"children": {
"run_training.setup": {
"total": 0.045228499000131706,
"count": 1,
"self": 0.045228499000131706
},
"TrainerController.start_learning": {
"total": 2408.3878652509998,
"count": 1,
"self": 1.5227404922102323,
"children": {
"TrainerController._reset_env": {
"total": 4.372809562999919,
"count": 1,
"self": 4.372809562999919
},
"TrainerController.advance": {
"total": 2402.31347861079,
"count": 63613,
"self": 1.5426062917786112,
"children": {
"env_step": {
"total": 1654.100119175012,
"count": 63613,
"self": 1533.5483556158597,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.6537796502198,
"count": 63613,
"self": 5.315162956189852,
"children": {
"TorchPolicy.evaluate": {
"total": 114.33861669402995,
"count": 62564,
"self": 114.33861669402995
}
}
},
"workers": {
"total": 0.8979839089324742,
"count": 63613,
"self": 0.0,
"children": {
"worker_root": {
"total": 2402.473869754956,
"count": 63613,
"is_parallel": true,
"self": 995.5665797390566,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020417639998413506,
"count": 1,
"is_parallel": true,
"self": 0.0006419350002033752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013998289996379754,
"count": 8,
"is_parallel": true,
"self": 0.0013998289996379754
}
}
},
"UnityEnvironment.step": {
"total": 0.05427327000006699,
"count": 1,
"is_parallel": true,
"self": 0.0006510250004794216,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005587609998656262,
"count": 1,
"is_parallel": true,
"self": 0.0005587609998656262
},
"communicator.exchange": {
"total": 0.050948230999892985,
"count": 1,
"is_parallel": true,
"self": 0.050948230999892985
},
"steps_from_proto": {
"total": 0.0021152529998289538,
"count": 1,
"is_parallel": true,
"self": 0.00040692199900149717,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017083310008274566,
"count": 8,
"is_parallel": true,
"self": 0.0017083310008274566
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1406.9072900158994,
"count": 63612,
"is_parallel": true,
"self": 37.35189973191518,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.770873719022802,
"count": 63612,
"is_parallel": true,
"self": 24.770873719022802
},
"communicator.exchange": {
"total": 1227.4933341469978,
"count": 63612,
"is_parallel": true,
"self": 1227.4933341469978
},
"steps_from_proto": {
"total": 117.29118241796368,
"count": 63612,
"is_parallel": true,
"self": 22.880441734859687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 94.410740683104,
"count": 508896,
"is_parallel": true,
"self": 94.410740683104
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 746.6707531439993,
"count": 63613,
"self": 2.7814571399553643,
"children": {
"process_trajectory": {
"total": 124.43886888404768,
"count": 63613,
"self": 124.21180837704742,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22706050700026026,
"count": 2,
"self": 0.22706050700026026
}
}
},
"_update_policy": {
"total": 619.4504271199962,
"count": 450,
"self": 407.0061091980242,
"children": {
"TorchPPOOptimizer.update": {
"total": 212.44431792197202,
"count": 22776,
"self": 212.44431792197202
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4739998732693493e-06,
"count": 1,
"self": 1.4739998732693493e-06
},
"TrainerController._save_models": {
"total": 0.17883511099989846,
"count": 1,
"self": 0.0022213660004126723,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1766137449994858,
"count": 1,
"self": 0.1766137449994858
}
}
}
}
}
}
}