ppo-Pyramids / run_logs /timers.json
abragin's picture
Initial commit
eaf64c6 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34482669830322266,
"min": 0.34482669830322266,
"max": 1.4624285697937012,
"count": 37
},
"Pyramids.Policy.Entropy.sum": {
"value": 10344.80078125,
"min": 10344.80078125,
"max": 44364.234375,
"count": 37
},
"Pyramids.Step.mean": {
"value": 1109901.0,
"min": 29952.0,
"max": 1109901.0,
"count": 37
},
"Pyramids.Step.sum": {
"value": 1109901.0,
"min": 29952.0,
"max": 1109901.0,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6680407524108887,
"min": -0.15676526725292206,
"max": 0.7045049667358398,
"count": 37
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 189.72357177734375,
"min": -37.15336990356445,
"max": 202.89743041992188,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.010759801603853703,
"min": 0.0036925566382706165,
"max": 0.39287158846855164,
"count": 37
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.055783748626709,
"min": 0.9526796340942383,
"max": 93.11056518554688,
"count": 37
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07033778849983632,
"min": 0.06535702866044923,
"max": 0.07368001354735777,
"count": 37
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9847290389977086,
"min": 0.512197334549173,
"max": 1.1052002032103665,
"count": 37
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015167649777140468,
"min": 0.0013843536278094065,
"max": 0.016498862148027118,
"count": 37
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21234709687996656,
"min": 0.01938095078933169,
"max": 0.23098407007237967,
"count": 37
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00019047644365072144,
"min": 0.00019047644365072144,
"max": 0.00029838354339596195,
"count": 37
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0026666702111101,
"min": 0.0020886848037717336,
"max": 0.004010746163084633,
"count": 37
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16349213571428572,
"min": 0.16349213571428572,
"max": 0.19946118095238097,
"count": 37
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.2888899,
"min": 1.3962282666666668,
"max": 2.812616433333333,
"count": 37
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006352864357857144,
"min": 0.006352864357857144,
"max": 0.009946171977142856,
"count": 37
},
"Pyramids.Policy.Beta.sum": {
"value": 0.08894010101000001,
"min": 0.06962320384,
"max": 0.13369784513,
"count": 37
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008566516451537609,
"min": 0.008566516451537609,
"max": 0.4927542507648468,
"count": 37
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11993123590946198,
"min": 0.11993123590946198,
"max": 3.44927978515625,
"count": 37
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 305.16161616161617,
"min": 287.6698113207547,
"max": 999.0,
"count": 37
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30211.0,
"min": 15984.0,
"max": 33309.0,
"count": 37
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5878619851171971,
"min": -1.0000000521540642,
"max": 1.6934528171594412,
"count": 37
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 158.7861985117197,
"min": -25.830201491713524,
"max": 179.50599861890078,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5878619851171971,
"min": -1.0000000521540642,
"max": 1.6934528171594412,
"count": 37
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 158.7861985117197,
"min": -25.830201491713524,
"max": 179.50599861890078,
"count": 37
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02777139512472786,
"min": 0.027392642043269008,
"max": 10.290611356496811,
"count": 37
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.777139512472786,
"min": 2.668499246588908,
"max": 164.64978170394897,
"count": 37
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 37
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1706875710",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1706878104"
},
"total": 2393.907917657,
"count": 1,
"self": 0.41226748900044186,
"children": {
"run_training.setup": {
"total": 0.04601177299991832,
"count": 1,
"self": 0.04601177299991832
},
"TrainerController.start_learning": {
"total": 2393.4496383949995,
"count": 1,
"self": 1.38982433801948,
"children": {
"TrainerController._reset_env": {
"total": 2.8399101289996906,
"count": 1,
"self": 2.8399101289996906
},
"TrainerController.advance": {
"total": 2389.088251951979,
"count": 71213,
"self": 1.5297376669891491,
"children": {
"env_step": {
"total": 1703.653578906008,
"count": 71213,
"self": 1560.409846672957,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.36788957802128,
"count": 71213,
"self": 5.238596605027851,
"children": {
"TorchPolicy.evaluate": {
"total": 137.12929297299343,
"count": 69479,
"self": 137.12929297299343
}
}
},
"workers": {
"total": 0.8758426550298282,
"count": 71212,
"self": 0.0,
"children": {
"worker_root": {
"total": 2387.9807726009694,
"count": 71212,
"is_parallel": true,
"self": 953.7730105051587,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0033214820000466716,
"count": 1,
"is_parallel": true,
"self": 0.0008232330005739641,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024982489994727075,
"count": 8,
"is_parallel": true,
"self": 0.0024982489994727075
}
}
},
"UnityEnvironment.step": {
"total": 0.05002011099986703,
"count": 1,
"is_parallel": true,
"self": 0.0005981559993415431,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005536230000871001,
"count": 1,
"is_parallel": true,
"self": 0.0005536230000871001
},
"communicator.exchange": {
"total": 0.047134647000348195,
"count": 1,
"is_parallel": true,
"self": 0.047134647000348195
},
"steps_from_proto": {
"total": 0.0017336850000901904,
"count": 1,
"is_parallel": true,
"self": 0.00035011099907933385,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013835740010108566,
"count": 8,
"is_parallel": true,
"self": 0.0013835740010108566
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1434.2077620958107,
"count": 71211,
"is_parallel": true,
"self": 38.5775411105833,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.361748394099777,
"count": 71211,
"is_parallel": true,
"self": 28.361748394099777
},
"communicator.exchange": {
"total": 1255.6464629360516,
"count": 71211,
"is_parallel": true,
"self": 1255.6464629360516
},
"steps_from_proto": {
"total": 111.622009655076,
"count": 71211,
"is_parallel": true,
"self": 22.38032163379603,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.24168802127997,
"count": 569688,
"is_parallel": true,
"self": 89.24168802127997
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 683.9049353789819,
"count": 71212,
"self": 2.7782892539667046,
"children": {
"process_trajectory": {
"total": 141.3053483560143,
"count": 71212,
"self": 141.05603431601367,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24931404000062685,
"count": 2,
"self": 0.24931404000062685
}
}
},
"_update_policy": {
"total": 539.8212977690009,
"count": 508,
"self": 316.59589603303857,
"children": {
"TorchPPOOptimizer.update": {
"total": 223.22540173596235,
"count": 25290,
"self": 223.22540173596235
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6990006770356558e-06,
"count": 1,
"self": 1.6990006770356558e-06
},
"TrainerController._save_models": {
"total": 0.13165027700051724,
"count": 1,
"self": 0.0020495340004345053,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12960074300008273,
"count": 1,
"self": 0.12960074300008273
}
}
}
}
}
}
}