ppo-PyramidsRND / run_logs /timers.json
fortminors's picture
First Push
eac6363 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4343549907207489,
"min": 0.4343549907207489,
"max": 1.4107428789138794,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13051.4990234375,
"min": 13051.4990234375,
"max": 42796.296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989985.0,
"min": 29952.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989985.0,
"min": 29952.0,
"max": 989985.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.32948029041290283,
"min": -0.11217870563268661,
"max": 0.3786553144454956,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 85.66487884521484,
"min": -27.03506851196289,
"max": 98.45037841796875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.21610121428966522,
"min": -0.11864599585533142,
"max": 0.3337077498435974,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 56.186317443847656,
"min": -30.847959518432617,
"max": 80.08985900878906,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06827555355405257,
"min": 0.06145190588722471,
"max": 0.07799802280628343,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.955857749756736,
"min": 0.47393946855207886,
"max": 1.0613960866855146,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.021311823298068102,
"min": 0.0006619276822778186,
"max": 0.021311823298068102,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2983655261729534,
"min": 0.005697813634406915,
"max": 0.2983655261729534,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4828689343142816e-06,
"min": 7.4828689343142816e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010476016508039995,
"min": 0.00010476016508039995,
"max": 0.0031366946544352007,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249425714285713,
"min": 0.10249425714285713,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349196,
"min": 1.3691136000000002,
"max": 2.4012865000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025917628857142843,
"min": 0.00025917628857142843,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036284680399999983,
"min": 0.0036284680399999983,
"max": 0.10458192352000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013865351676940918,
"min": 0.013865351676940918,
"max": 0.5297890305519104,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19411492347717285,
"min": 0.19411492347717285,
"max": 3.7085232734680176,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 537.5283018867924,
"min": 462.77941176470586,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28489.0,
"min": 15984.0,
"max": 32782.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.171340704102207,
"min": -1.0000000521540642,
"max": 1.3847641498962444,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 63.252398021519184,
"min": -32.000001668930054,
"max": 92.77919804304838,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.171340704102207,
"min": -1.0000000521540642,
"max": 1.3847641498962444,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 63.252398021519184,
"min": -32.000001668930054,
"max": 92.77919804304838,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07609475129963054,
"min": 0.06732478687442395,
"max": 10.41972737479955,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.109116570180049,
"min": 4.109116570180049,
"max": 166.7156379967928,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724103232",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=PyramidsTraining --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1724105322"
},
"total": 2090.358414217,
"count": 1,
"self": 0.5262802519996512,
"children": {
"run_training.setup": {
"total": 0.057096033999641804,
"count": 1,
"self": 0.057096033999641804
},
"TrainerController.start_learning": {
"total": 2089.7750379310005,
"count": 1,
"self": 1.2494090809036607,
"children": {
"TrainerController._reset_env": {
"total": 2.4860343239997746,
"count": 1,
"self": 2.4860343239997746
},
"TrainerController.advance": {
"total": 2085.9547860490966,
"count": 63424,
"self": 1.347612151057092,
"children": {
"env_step": {
"total": 1452.6177551180513,
"count": 63424,
"self": 1323.6565996711665,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.18791677805257,
"count": 63424,
"self": 4.4799376749383555,
"children": {
"TorchPolicy.evaluate": {
"total": 123.70797910311421,
"count": 62542,
"self": 123.70797910311421
}
}
},
"workers": {
"total": 0.773238668832164,
"count": 63424,
"self": 0.0,
"children": {
"worker_root": {
"total": 2085.5283897719924,
"count": 63424,
"is_parallel": true,
"self": 876.1794795529395,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021355059998313664,
"count": 1,
"is_parallel": true,
"self": 0.000712068000666477,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014234379991648893,
"count": 8,
"is_parallel": true,
"self": 0.0014234379991648893
}
}
},
"UnityEnvironment.step": {
"total": 0.08577479600035076,
"count": 1,
"is_parallel": true,
"self": 0.0006372080001710856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004207169999972393,
"count": 1,
"is_parallel": true,
"self": 0.0004207169999972393
},
"communicator.exchange": {
"total": 0.08188057300003493,
"count": 1,
"is_parallel": true,
"self": 0.08188057300003493
},
"steps_from_proto": {
"total": 0.002836298000147508,
"count": 1,
"is_parallel": true,
"self": 0.00038860500035298173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0024476929997945263,
"count": 8,
"is_parallel": true,
"self": 0.0024476929997945263
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1209.3489102190529,
"count": 63423,
"is_parallel": true,
"self": 32.308727493284096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.95675904693917,
"count": 63423,
"is_parallel": true,
"self": 21.95675904693917
},
"communicator.exchange": {
"total": 1061.2659345849452,
"count": 63423,
"is_parallel": true,
"self": 1061.2659345849452
},
"steps_from_proto": {
"total": 93.81748909388443,
"count": 63423,
"is_parallel": true,
"self": 18.47941773192315,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.33807136196128,
"count": 507384,
"is_parallel": true,
"self": 75.33807136196128
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 631.9894187799882,
"count": 63424,
"self": 2.4501784450731066,
"children": {
"process_trajectory": {
"total": 124.59528672192755,
"count": 63424,
"self": 124.10009603792696,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4951906840005904,
"count": 2,
"self": 0.4951906840005904
}
}
},
"_update_policy": {
"total": 504.94395361298757,
"count": 440,
"self": 298.5806123119628,
"children": {
"TorchPPOOptimizer.update": {
"total": 206.36334130102478,
"count": 22779,
"self": 206.36334130102478
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.297999915550463e-06,
"count": 1,
"self": 1.297999915550463e-06
},
"TrainerController._save_models": {
"total": 0.08480717900056334,
"count": 1,
"self": 0.0014667250006823451,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08334045399988099,
"count": 1,
"self": 0.08334045399988099
}
}
}
}
}
}
}