Pyramids / run_logs /timers.json
Makrrr's picture
Init
0fe399c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.11727878451347351,
"min": 0.11537830531597137,
"max": 1.3033181428909302,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 3512.734130859375,
"min": 3455.81103515625,
"max": 39537.4609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989939.0,
"min": 29875.0,
"max": 989939.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989939.0,
"min": 29875.0,
"max": 989939.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.08393924683332443,
"min": -0.10330682247877121,
"max": 0.019485486671328545,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -20.14542007446289,
"min": -24.793638229370117,
"max": 4.69600248336792,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.6878089904785156,
"min": 1.048180341720581,
"max": 1.763006567955017,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 405.07415771484375,
"min": 248.4187469482422,
"max": 423.12158203125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06541793363649298,
"min": 0.06453725416906222,
"max": 0.07317445483948312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9158510709109017,
"min": 0.5779221702859627,
"max": 1.0783769823188492,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011727771771024924,
"min": 0.008855445069724315,
"max": 0.052297414268774,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16418880479434894,
"min": 0.0949367318950128,
"max": 0.418379314150192,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4157975281e-06,
"min": 7.4157975281e-06,
"max": 0.0002948486642171125,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010382116539339999,
"min": 0.00010382116539339999,
"max": 0.0032247289250903997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247189999999999,
"min": 0.10247189999999999,
"max": 0.1982828875,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346066,
"min": 1.4346066,
"max": 2.3594592,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025694281000000004,
"min": 0.00025694281000000004,
"max": 0.009828460461249999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035971993400000005,
"min": 0.0035971993400000005,
"max": 0.10750346904000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 1.7030973434448242,
"min": 1.1904410123825073,
"max": 1.7541483640670776,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 23.84336280822754,
"min": 9.523528099060059,
"max": 25.720483779907227,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 985.4516129032259,
"min": 915.5294117647059,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30549.0,
"min": 16418.0,
"max": 32519.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9213419854640961,
"min": -0.999962551984936,
"max": -0.5571250508073717,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -28.561601549386978,
"min": -31.998801663517952,
"max": -14.434000805020332,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9213419854640961,
"min": -0.999962551984936,
"max": -0.5571250508073717,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -28.561601549386978,
"min": -31.998801663517952,
"max": -14.434000805020332,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 16.759374566999593,
"min": 11.888948411680758,
"max": 19.582620084285736,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 519.5406115769874,
"min": 332.9045414328575,
"max": 536.1754525303841,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748631886",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748634002"
},
"total": 2115.6378455759996,
"count": 1,
"self": 0.47689184700084297,
"children": {
"run_training.setup": {
"total": 0.02150907599934726,
"count": 1,
"self": 0.02150907599934726
},
"TrainerController.start_learning": {
"total": 2115.1394446529994,
"count": 1,
"self": 1.352522110897553,
"children": {
"TrainerController._reset_env": {
"total": 2.5895411110004716,
"count": 1,
"self": 2.5895411110004716
},
"TrainerController.advance": {
"total": 2111.1148638371005,
"count": 63259,
"self": 1.540222501230346,
"children": {
"env_step": {
"total": 1442.6756700799697,
"count": 63259,
"self": 1289.0098628557507,
"children": {
"SubprocessEnvManager._take_step": {
"total": 152.87008870005047,
"count": 63259,
"self": 4.574176225962219,
"children": {
"TorchPolicy.evaluate": {
"total": 148.29591247408825,
"count": 62580,
"self": 148.29591247408825
}
}
},
"workers": {
"total": 0.7957185241684783,
"count": 63259,
"self": 0.0,
"children": {
"worker_root": {
"total": 2110.3995245019187,
"count": 63259,
"is_parallel": true,
"self": 932.1839104439587,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001917501999741944,
"count": 1,
"is_parallel": true,
"self": 0.0006387780003933585,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012787239993485855,
"count": 8,
"is_parallel": true,
"self": 0.0012787239993485855
}
}
},
"UnityEnvironment.step": {
"total": 0.09024574399973062,
"count": 1,
"is_parallel": true,
"self": 0.000537002999408287,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000397312000131933,
"count": 1,
"is_parallel": true,
"self": 0.000397312000131933
},
"communicator.exchange": {
"total": 0.0877936150000096,
"count": 1,
"is_parallel": true,
"self": 0.0877936150000096
},
"steps_from_proto": {
"total": 0.001517814000180806,
"count": 1,
"is_parallel": true,
"self": 0.0002998710006067995,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012179429995740065,
"count": 8,
"is_parallel": true,
"self": 0.0012179429995740065
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1178.21561405796,
"count": 63258,
"is_parallel": true,
"self": 31.132442607053235,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.41987773017354,
"count": 63258,
"is_parallel": true,
"self": 22.41987773017354
},
"communicator.exchange": {
"total": 1030.7513862279557,
"count": 63258,
"is_parallel": true,
"self": 1030.7513862279557
},
"steps_from_proto": {
"total": 93.91190749277757,
"count": 63258,
"is_parallel": true,
"self": 18.806460532749952,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.10544696002762,
"count": 506064,
"is_parallel": true,
"self": 75.10544696002762
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 666.8989712559005,
"count": 63259,
"self": 2.457798588948208,
"children": {
"process_trajectory": {
"total": 127.16368415295256,
"count": 63259,
"self": 126.96304914595203,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20063500700052828,
"count": 2,
"self": 0.20063500700052828
}
}
},
"_update_policy": {
"total": 537.2774885139997,
"count": 442,
"self": 299.0454969221173,
"children": {
"TorchPPOOptimizer.update": {
"total": 238.23199159188243,
"count": 22794,
"self": 238.23199159188243
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.780005711945705e-07,
"count": 1,
"self": 9.780005711945705e-07
},
"TrainerController._save_models": {
"total": 0.08251661600024818,
"count": 1,
"self": 0.0018325820001336979,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08068403400011448,
"count": 1,
"self": 0.08068403400011448
}
}
}
}
}
}
}