ppo-Pyramids / run_logs /timers.json
Jeibros's picture
First Push
d67d673
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4060059189796448,
"min": 0.38895347714424133,
"max": 1.4938462972640991,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12206.162109375,
"min": 11525.4697265625,
"max": 45317.3203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989994.0,
"min": 29952.0,
"max": 989994.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5402843356132507,
"min": -0.12726302444934845,
"max": 0.5408269762992859,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 147.49761962890625,
"min": -30.543127059936523,
"max": 148.7274169921875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.08557944744825363,
"min": -0.05794571340084076,
"max": 0.2352677583694458,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 23.363189697265625,
"min": -14.892047882080078,
"max": 61.404884338378906,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07143790309408063,
"min": 0.0643910342113784,
"max": 0.072798014273859,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0001306433171289,
"min": 0.4855815346898704,
"max": 1.0745404994066157,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015626810832137922,
"min": 0.00011726431928949405,
"max": 0.01765570354666929,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21877535164993092,
"min": 0.0015244361507634227,
"max": 0.2471798496533701,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.536076059435715e-06,
"min": 7.536076059435715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001055050648321,
"min": 0.0001055050648321,
"max": 0.0035088866303711994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251199285714285,
"min": 0.10251199285714285,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351679,
"min": 1.3691136000000002,
"max": 2.5696288000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026094808642857144,
"min": 0.00026094808642857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036532732100000003,
"min": 0.0036532732100000003,
"max": 0.11698591711999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012456392869353294,
"min": 0.012456392869353294,
"max": 0.3294225335121155,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17438949644565582,
"min": 0.17438949644565582,
"max": 2.305957794189453,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 354.719512195122,
"min": 346.53932584269666,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29087.0,
"min": 15984.0,
"max": 32304.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6209086247800308,
"min": -1.0000000521540642,
"max": 1.6209086247800308,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 131.2935986071825,
"min": -32.000001668930054,
"max": 144.665798291564,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6209086247800308,
"min": -1.0000000521540642,
"max": 1.6209086247800308,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 131.2935986071825,
"min": -32.000001668930054,
"max": 144.665798291564,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04553977876612744,
"min": 0.04553977876612744,
"max": 6.897034697234631,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.6887220800563227,
"min": 3.6887220800563227,
"max": 110.35255515575409,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686584126",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686586479"
},
"total": 2353.682177957,
"count": 1,
"self": 0.9142714239997076,
"children": {
"run_training.setup": {
"total": 0.07639235000010558,
"count": 1,
"self": 0.07639235000010558
},
"TrainerController.start_learning": {
"total": 2352.691514183,
"count": 1,
"self": 1.6857892400139463,
"children": {
"TrainerController._reset_env": {
"total": 4.6419321979999495,
"count": 1,
"self": 4.6419321979999495
},
"TrainerController.advance": {
"total": 2346.204608934986,
"count": 63661,
"self": 1.6932031859960261,
"children": {
"env_step": {
"total": 1659.193904825001,
"count": 63661,
"self": 1531.1257454320762,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.09734540501586,
"count": 63661,
"self": 5.470583340067151,
"children": {
"TorchPolicy.evaluate": {
"total": 121.62676206494871,
"count": 62553,
"self": 121.62676206494871
}
}
},
"workers": {
"total": 0.9708139879089686,
"count": 63661,
"self": 0.0,
"children": {
"worker_root": {
"total": 2346.620035356012,
"count": 63661,
"is_parallel": true,
"self": 944.1939448089051,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001993071000015334,
"count": 1,
"is_parallel": true,
"self": 0.0006092969999826892,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013837740000326448,
"count": 8,
"is_parallel": true,
"self": 0.0013837740000326448
}
}
},
"UnityEnvironment.step": {
"total": 0.05750171800013959,
"count": 1,
"is_parallel": true,
"self": 0.0006757470000593457,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005528980000235606,
"count": 1,
"is_parallel": true,
"self": 0.0005528980000235606
},
"communicator.exchange": {
"total": 0.05419097899994085,
"count": 1,
"is_parallel": true,
"self": 0.05419097899994085
},
"steps_from_proto": {
"total": 0.0020820940001158306,
"count": 1,
"is_parallel": true,
"self": 0.0004135730002872151,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016685209998286155,
"count": 8,
"is_parallel": true,
"self": 0.0016685209998286155
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1402.426090547107,
"count": 63660,
"is_parallel": true,
"self": 36.27569683209231,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.600180334012748,
"count": 63660,
"is_parallel": true,
"self": 24.600180334012748
},
"communicator.exchange": {
"total": 1229.7264607520042,
"count": 63660,
"is_parallel": true,
"self": 1229.7264607520042
},
"steps_from_proto": {
"total": 111.82375262899768,
"count": 63660,
"is_parallel": true,
"self": 22.300824728821453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 89.52292790017623,
"count": 509280,
"is_parallel": true,
"self": 89.52292790017623
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 685.3175009239892,
"count": 63661,
"self": 3.134231234989784,
"children": {
"process_trajectory": {
"total": 117.4458383730032,
"count": 63661,
"self": 117.14049285100305,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3053455220001524,
"count": 2,
"self": 0.3053455220001524
}
}
},
"_update_policy": {
"total": 564.7374313159962,
"count": 443,
"self": 361.6502136129975,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.08721770299871,
"count": 22833,
"self": 203.08721770299871
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3179997040424496e-06,
"count": 1,
"self": 1.3179997040424496e-06
},
"TrainerController._save_models": {
"total": 0.15918249200012724,
"count": 1,
"self": 0.0019052160000683216,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15727727600005892,
"count": 1,
"self": 0.15727727600005892
}
}
}
}
}
}
}