PyramidsRND / run_logs /timers.json
xiazeng's picture
First Push
48db6b8
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.42745521664619446,
"min": 0.42720916867256165,
"max": 1.4355342388153076,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12734.74609375,
"min": 12734.74609375,
"max": 43548.3671875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989996.0,
"min": 29963.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989996.0,
"min": 29963.0,
"max": 989996.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49868324398994446,
"min": -0.10083520412445068,
"max": 0.4993745684623718,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 135.14315795898438,
"min": -24.301284790039062,
"max": 135.8298797607422,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.16643992066383362,
"min": -0.16643992066383362,
"max": 0.3781134784221649,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -45.10521697998047,
"min": -45.10521697998047,
"max": 90.74723815917969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0672510928551977,
"min": 0.06441611856227372,
"max": 0.07397898587135107,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0087663928279655,
"min": 0.5780115663366501,
"max": 1.0549503255363863,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014534920859862016,
"min": 0.000831639535990637,
"max": 0.01668207340064088,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21802381289793024,
"min": 0.011642953503868918,
"max": 0.23354902760897234,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.469797510099998e-06,
"min": 7.469797510099998e-06,
"max": 0.0002948531642156125,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011204696265149997,
"min": 0.00011204696265149997,
"max": 0.003633345188884999,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024899,
"min": 0.1024899,
"max": 0.1982843875,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373485,
"min": 1.4774446000000003,
"max": 2.611115,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002587410099999999,
"min": 0.0002587410099999999,
"max": 0.00982861031125,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038811151499999986,
"min": 0.0038811151499999986,
"max": 0.12113038849999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01016462966799736,
"min": 0.01016462966799736,
"max": 0.5040149688720703,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1524694412946701,
"min": 0.14933843910694122,
"max": 4.0321197509765625,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 371.7564102564103,
"min": 344.89156626506025,
"max": 998.1875,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28997.0,
"min": 16378.0,
"max": 32971.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5491316163841682,
"min": -0.9366438027936965,
"max": 1.582799975681736,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 122.38139769434929,
"min": -29.97260168939829,
"max": 131.37239798158407,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5491316163841682,
"min": -0.9366438027936965,
"max": 1.582799975681736,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 122.38139769434929,
"min": -29.97260168939829,
"max": 131.37239798158407,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03969827307225352,
"min": 0.03969827307225352,
"max": 9.790487950996441,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1361635727080284,
"min": 3.1361635727080284,
"max": 166.4382951669395,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676138771",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676140898"
},
"total": 2127.2470620329996,
"count": 1,
"self": 0.4761281819996839,
"children": {
"run_training.setup": {
"total": 0.11258154500001183,
"count": 1,
"self": 0.11258154500001183
},
"TrainerController.start_learning": {
"total": 2126.658352306,
"count": 1,
"self": 1.2355610520285154,
"children": {
"TrainerController._reset_env": {
"total": 7.393382454999937,
"count": 1,
"self": 7.393382454999937
},
"TrainerController.advance": {
"total": 2117.938186805972,
"count": 63702,
"self": 1.3002901059498981,
"children": {
"env_step": {
"total": 1389.1521536149585,
"count": 63702,
"self": 1279.7296319119591,
"children": {
"SubprocessEnvManager._take_step": {
"total": 108.65265323303868,
"count": 63702,
"self": 4.437966436030706,
"children": {
"TorchPolicy.evaluate": {
"total": 104.21468679700797,
"count": 62563,
"self": 35.21849018401247,
"children": {
"TorchPolicy.sample_actions": {
"total": 68.9961966129955,
"count": 62563,
"self": 68.9961966129955
}
}
}
}
},
"workers": {
"total": 0.7698684699605565,
"count": 63702,
"self": 0.0,
"children": {
"worker_root": {
"total": 2122.246226348013,
"count": 63702,
"is_parallel": true,
"self": 952.3636917520178,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024279140000089683,
"count": 1,
"is_parallel": true,
"self": 0.000804686000037691,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016232279999712773,
"count": 8,
"is_parallel": true,
"self": 0.0016232279999712773
}
}
},
"UnityEnvironment.step": {
"total": 0.045676430999947115,
"count": 1,
"is_parallel": true,
"self": 0.0005046929999252825,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004684689999976399,
"count": 1,
"is_parallel": true,
"self": 0.0004684689999976399
},
"communicator.exchange": {
"total": 0.04314309400001548,
"count": 1,
"is_parallel": true,
"self": 0.04314309400001548
},
"steps_from_proto": {
"total": 0.0015601750000087122,
"count": 1,
"is_parallel": true,
"self": 0.0003747430000657914,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011854319999429208,
"count": 8,
"is_parallel": true,
"self": 0.0011854319999429208
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1169.8825345959951,
"count": 63701,
"is_parallel": true,
"self": 30.498246739882234,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.151474821022703,
"count": 63701,
"is_parallel": true,
"self": 22.151474821022703
},
"communicator.exchange": {
"total": 1017.6498551450472,
"count": 63701,
"is_parallel": true,
"self": 1017.6498551450472
},
"steps_from_proto": {
"total": 99.58295789004296,
"count": 63701,
"is_parallel": true,
"self": 21.28035009095072,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.30260779909224,
"count": 509608,
"is_parallel": true,
"self": 78.30260779909224
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 727.4857430850635,
"count": 63702,
"self": 2.3421099640694365,
"children": {
"process_trajectory": {
"total": 157.21797140599574,
"count": 63702,
"self": 157.03181868299544,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18615272300030483,
"count": 2,
"self": 0.18615272300030483
}
}
},
"_update_policy": {
"total": 567.9256617149983,
"count": 455,
"self": 218.9818677790322,
"children": {
"TorchPPOOptimizer.update": {
"total": 348.94379393596614,
"count": 22761,
"self": 348.94379393596614
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1069996617152356e-06,
"count": 1,
"self": 1.1069996617152356e-06
},
"TrainerController._save_models": {
"total": 0.09122088599997369,
"count": 1,
"self": 0.0015081299998200848,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0897127560001536,
"count": 1,
"self": 0.0897127560001536
}
}
}
}
}
}
}