Pyramids1 / run_logs /timers.json
noodlynoodle's picture
Pyramids Training First Commit
6625755
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.25953876972198486,
"min": 0.2554902136325836,
"max": 1.4421803951263428,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 7790.3154296875,
"min": 7681.05810546875,
"max": 43749.984375,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499973.0,
"min": 29952.0,
"max": 1499973.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499973.0,
"min": 29952.0,
"max": 1499973.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8143863081932068,
"min": -0.09743756055831909,
"max": 0.851116955280304,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 241.87274169921875,
"min": -23.482452392578125,
"max": 258.73956298828125,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03962195664644241,
"min": 0.021022958680987358,
"max": 0.3204623758792877,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 11.767721176147461,
"min": 6.096657752990723,
"max": 76.86979675292969,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06989702922319772,
"min": 0.06551994576186713,
"max": 0.07377703284873957,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9785584091247681,
"min": 0.49282712330650297,
"max": 1.0799011346506293,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014080626318380902,
"min": 0.0012980103475642945,
"max": 0.015892312451753588,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19712876845733263,
"min": 0.018172144865900124,
"max": 0.23813491165478184,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00015150425664145476,
"min": 0.00015150425664145476,
"max": 0.00029838354339596195,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0021210595929803664,
"min": 0.0020886848037717336,
"max": 0.004011224062925333,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1505014023809524,
"min": 0.1505014023809524,
"max": 0.19946118095238097,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.1070196333333335,
"min": 1.3962282666666668,
"max": 2.7824553,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0050550900978571425,
"min": 0.0050550900978571425,
"max": 0.009946171977142856,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.07077126136999999,
"min": 0.06962320384,
"max": 0.1337137592,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.06995836645364761,
"min": 0.05724186822772026,
"max": 0.5387540459632874,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.979417085647583,
"min": 0.8586280345916748,
"max": 3.7712783813476562,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 222.828125,
"min": 206.85507246376812,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28522.0,
"min": 15984.0,
"max": 32329.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7741317714369573,
"min": -1.0000000521540642,
"max": 1.793144920165988,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 228.8629985153675,
"min": -30.168801687657833,
"max": 247.45399898290634,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7741317714369573,
"min": -1.0000000521540642,
"max": 1.793144920165988,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 228.8629985153675,
"min": -30.168801687657833,
"max": 247.45399898290634,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.15992938654425973,
"min": 0.15674080851528308,
"max": 10.55444335564971,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 20.630890864209505,
"min": 18.251696077990346,
"max": 168.87109369039536,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674193654",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674196920"
},
"total": 3266.271138237,
"count": 1,
"self": 0.35971407399983946,
"children": {
"run_training.setup": {
"total": 0.09719678200008275,
"count": 1,
"self": 0.09719678200008275
},
"TrainerController.start_learning": {
"total": 3265.814227381,
"count": 1,
"self": 1.8229607039829716,
"children": {
"TrainerController._reset_env": {
"total": 5.929033473000118,
"count": 1,
"self": 5.929033473000118
},
"TrainerController.advance": {
"total": 3257.9293664160164,
"count": 98018,
"self": 1.9323758699947575,
"children": {
"env_step": {
"total": 2259.167900679028,
"count": 98018,
"self": 2105.5260272810283,
"children": {
"SubprocessEnvManager._take_step": {
"total": 152.5071143779494,
"count": 98018,
"self": 6.392982578003284,
"children": {
"TorchPolicy.evaluate": {
"total": 146.1141317999461,
"count": 94796,
"self": 49.09433555398596,
"children": {
"TorchPolicy.sample_actions": {
"total": 97.01979624596015,
"count": 94796,
"self": 97.01979624596015
}
}
}
}
},
"workers": {
"total": 1.1347590200505238,
"count": 98017,
"self": 0.0,
"children": {
"worker_root": {
"total": 3259.662274534021,
"count": 98017,
"is_parallel": true,
"self": 1299.533615427968,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017242639999039966,
"count": 1,
"is_parallel": true,
"self": 0.000608542000009038,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011157219998949586,
"count": 8,
"is_parallel": true,
"self": 0.0011157219998949586
}
}
},
"UnityEnvironment.step": {
"total": 0.04609433099994931,
"count": 1,
"is_parallel": true,
"self": 0.0004747250002310466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004216329998598667,
"count": 1,
"is_parallel": true,
"self": 0.0004216329998598667
},
"communicator.exchange": {
"total": 0.04358322699999917,
"count": 1,
"is_parallel": true,
"self": 0.04358322699999917
},
"steps_from_proto": {
"total": 0.001614745999859224,
"count": 1,
"is_parallel": true,
"self": 0.0004187790004834824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011959669993757416,
"count": 8,
"is_parallel": true,
"self": 0.0011959669993757416
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1960.128659106053,
"count": 98016,
"is_parallel": true,
"self": 40.912380216932206,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 32.58134666600586,
"count": 98016,
"is_parallel": true,
"self": 32.58134666600586
},
"communicator.exchange": {
"total": 1741.3693985590874,
"count": 98016,
"is_parallel": true,
"self": 1741.3693985590874
},
"steps_from_proto": {
"total": 145.26553366402754,
"count": 98016,
"is_parallel": true,
"self": 31.580711796222204,
"children": {
"_process_rank_one_or_two_observation": {
"total": 113.68482186780534,
"count": 784128,
"is_parallel": true,
"self": 113.68482186780534
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 996.8290898669934,
"count": 98017,
"self": 3.5357743109927924,
"children": {
"process_trajectory": {
"total": 216.91994799100848,
"count": 98017,
"self": 216.6499311080088,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2700168829996983,
"count": 3,
"self": 0.2700168829996983
}
}
},
"_update_policy": {
"total": 776.3733675649921,
"count": 700,
"self": 302.0121001809605,
"children": {
"TorchPPOOptimizer.update": {
"total": 474.36126738403163,
"count": 34551,
"self": 474.36126738403163
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2090004020137712e-06,
"count": 1,
"self": 1.2090004020137712e-06
},
"TrainerController._save_models": {
"total": 0.1328655790002813,
"count": 1,
"self": 0.0019326189994899323,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13093296000079135,
"count": 1,
"self": 0.13093296000079135
}
}
}
}
}
}
}