PyramidsAgent / run_logs /timers.json
EliasNsilva's picture
First Push
cc02649
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4323011040687561,
"min": 0.4323011040687561,
"max": 1.521894097328186,
"count": 46
},
"Pyramids.Policy.Entropy.sum": {
"value": 13038.201171875,
"min": 13038.201171875,
"max": 46168.1796875,
"count": 46
},
"Pyramids.Step.mean": {
"value": 1379889.0,
"min": 29952.0,
"max": 1379889.0,
"count": 46
},
"Pyramids.Step.sum": {
"value": 1379889.0,
"min": 29952.0,
"max": 1379889.0,
"count": 46
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6202417612075806,
"min": -0.12486793100833893,
"max": 0.6453419923782349,
"count": 46
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 174.90817260742188,
"min": -29.593700408935547,
"max": 179.4050750732422,
"count": 46
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.012686235830187798,
"min": -0.020177382975816727,
"max": 0.10471756756305695,
"count": 46
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.5775184631347656,
"min": -4.782039642333984,
"max": 25.341651916503906,
"count": 46
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07091114711191202,
"min": 0.06393599365570475,
"max": 0.07449721325344096,
"count": 46
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9927560595667683,
"min": 0.4811091471929103,
"max": 1.1174581988016143,
"count": 46
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013107569443198255,
"min": 0.0003153645406325535,
"max": 0.014699944187480213,
"count": 46
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.18350597220477557,
"min": 0.004322338205057764,
"max": 0.20579921862472297,
"count": 46
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00016352934549023333,
"min": 0.00016352934549023333,
"max": 0.00029838354339596195,
"count": 46
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0022894108368632666,
"min": 0.0020886848037717336,
"max": 0.003969584076805333,
"count": 46
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.15450976666666666,
"min": 0.15450976666666666,
"max": 0.19946118095238097,
"count": 46
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.1631367333333333,
"min": 1.3962282666666668,
"max": 2.7231946666666667,
"count": 46
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00545552569,
"min": 0.00545552569,
"max": 0.009946171977142856,
"count": 46
},
"Pyramids.Policy.Beta.sum": {
"value": 0.07637735966,
"min": 0.06962320384,
"max": 0.13232714720000002,
"count": 46
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005336236208677292,
"min": 0.005336236208677292,
"max": 0.26477232575416565,
"count": 46
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07470730692148209,
"min": 0.07470730692148209,
"max": 1.8534061908721924,
"count": 46
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 297.9375,
"min": 297.1546391752577,
"max": 999.0,
"count": 46
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28602.0,
"min": 15984.0,
"max": 33129.0,
"count": 46
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6535793652700395,
"min": -1.0000000521540642,
"max": 1.702845345606509,
"count": 46
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 160.39719843119383,
"min": -30.9994016289711,
"max": 165.17599852383137,
"count": 46
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6535793652700395,
"min": -1.0000000521540642,
"max": 1.702845345606509,
"count": 46
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 160.39719843119383,
"min": -30.9994016289711,
"max": 165.17599852383137,
"count": 46
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.016989419559921743,
"min": 0.016989419559921743,
"max": 5.501258888281882,
"count": 46
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.647973697312409,
"min": 1.647973697312409,
"max": 88.02014221251011,
"count": 46
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 46
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1698107012",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1698110176"
},
"total": 3164.609970321,
"count": 1,
"self": 0.4864675460003127,
"children": {
"run_training.setup": {
"total": 0.042338582999946084,
"count": 1,
"self": 0.042338582999946084
},
"TrainerController.start_learning": {
"total": 3164.081164192,
"count": 1,
"self": 2.0965730678226464,
"children": {
"TrainerController._reset_env": {
"total": 3.4329820379998637,
"count": 1,
"self": 3.4329820379998637
},
"TrainerController.advance": {
"total": 3158.548553360177,
"count": 88580,
"self": 2.178246579084771,
"children": {
"env_step": {
"total": 2261.430189991037,
"count": 88580,
"self": 2063.367065181015,
"children": {
"SubprocessEnvManager._take_step": {
"total": 196.76392753203095,
"count": 88580,
"self": 6.740243861953559,
"children": {
"TorchPolicy.evaluate": {
"total": 190.0236836700774,
"count": 86988,
"self": 190.0236836700774
}
}
},
"workers": {
"total": 1.2991972779911976,
"count": 88579,
"self": 0.0,
"children": {
"worker_root": {
"total": 3157.3664632769987,
"count": 88579,
"is_parallel": true,
"self": 1269.1054589470596,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018875400000979425,
"count": 1,
"is_parallel": true,
"self": 0.0006164230001104443,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012711169999874983,
"count": 8,
"is_parallel": true,
"self": 0.0012711169999874983
}
}
},
"UnityEnvironment.step": {
"total": 0.05493908999983432,
"count": 1,
"is_parallel": true,
"self": 0.0005921249999119027,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005522349999864673,
"count": 1,
"is_parallel": true,
"self": 0.0005522349999864673
},
"communicator.exchange": {
"total": 0.05209911899987674,
"count": 1,
"is_parallel": true,
"self": 0.05209911899987674
},
"steps_from_proto": {
"total": 0.0016956110000592162,
"count": 1,
"is_parallel": true,
"self": 0.00036968400013392966,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013259269999252865,
"count": 8,
"is_parallel": true,
"self": 0.0013259269999252865
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1888.2610043299392,
"count": 88578,
"is_parallel": true,
"self": 49.84024531483192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.26995800008808,
"count": 88578,
"is_parallel": true,
"self": 36.26995800008808
},
"communicator.exchange": {
"total": 1655.7077670529593,
"count": 88578,
"is_parallel": true,
"self": 1655.7077670529593
},
"steps_from_proto": {
"total": 146.44303396205987,
"count": 88578,
"is_parallel": true,
"self": 30.148753674154705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 116.29428028790517,
"count": 708624,
"is_parallel": true,
"self": 116.29428028790517
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 894.9401167900551,
"count": 88579,
"self": 3.96228238894696,
"children": {
"process_trajectory": {
"total": 179.12121907810092,
"count": 88579,
"self": 178.95132169610156,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16989738199936255,
"count": 2,
"self": 0.16989738199936255
}
}
},
"_update_policy": {
"total": 711.8566153230072,
"count": 635,
"self": 419.2086930179803,
"children": {
"TorchPPOOptimizer.update": {
"total": 292.6479223050269,
"count": 31677,
"self": 292.6479223050269
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1800002539530396e-06,
"count": 1,
"self": 1.1800002539530396e-06
},
"TrainerController._save_models": {
"total": 0.0030545460003850167,
"count": 1,
"self": 2.3812000108591747e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.003030734000276425,
"count": 1,
"self": 0.003030734000276425
}
}
}
}
}
}
}