ppo-Pyramids / run_logs /timers.json
hosseinkamyab's picture
First Push
e8942fb verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3231087625026703,
"min": 0.3231087625026703,
"max": 1.3758785724639893,
"count": 35
},
"Pyramids.Policy.Entropy.sum": {
"value": 9801.8271484375,
"min": 9801.8271484375,
"max": 41738.65234375,
"count": 35
},
"Pyramids.Step.mean": {
"value": 1049907.0,
"min": 29952.0,
"max": 1049907.0,
"count": 35
},
"Pyramids.Step.sum": {
"value": 1049907.0,
"min": 29952.0,
"max": 1049907.0,
"count": 35
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7244997024536133,
"min": -0.10482478886842728,
"max": 0.7244997024536133,
"count": 35
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 210.82940673828125,
"min": -25.367599487304688,
"max": 210.82940673828125,
"count": 35
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.017890356481075287,
"min": -0.023667270317673683,
"max": 0.4719621241092682,
"count": 35
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.206093788146973,
"min": -6.295494079589844,
"max": 113.27091217041016,
"count": 35
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06844308313527875,
"min": 0.06523813717535186,
"max": 0.07273681425847046,
"count": 35
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9582031638939025,
"min": 0.4991438737993487,
"max": 1.0603408240365078,
"count": 35
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014152040907201529,
"min": 0.00027127492039922396,
"max": 0.01700292408382533,
"count": 35
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1981285727008214,
"min": 0.0032552990447906878,
"max": 0.23804093717355465,
"count": 35
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00019650694164055472,
"min": 0.00019650694164055472,
"max": 0.00029838354339596195,
"count": 35
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.002751097182967766,
"min": 0.0020886848037717336,
"max": 0.0040277440574187,
"count": 35
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16550230238095237,
"min": 0.16550230238095237,
"max": 0.19946118095238097,
"count": 35
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.3170322333333333,
"min": 1.3962282666666668,
"max": 2.8425813,
"count": 35
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006553680007857144,
"min": 0.006553680007857144,
"max": 0.009946171977142856,
"count": 35
},
"Pyramids.Policy.Beta.sum": {
"value": 0.09175152011000001,
"min": 0.06962320384,
"max": 0.13427387187000003,
"count": 35
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015913907438516617,
"min": 0.015913907438516617,
"max": 0.6706464886665344,
"count": 35
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.22279471158981323,
"min": 0.22279471158981323,
"max": 4.694525241851807,
"count": 35
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 273.6454545454545,
"min": 258.09090909090907,
"max": 999.0,
"count": 35
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30101.0,
"min": 15984.0,
"max": 33238.0,
"count": 35
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7081672538410533,
"min": -1.0000000521540642,
"max": 1.725373536594643,
"count": 35
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 187.89839792251587,
"min": -32.000001668930054,
"max": 208.7701979279518,
"count": 35
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7081672538410533,
"min": -1.0000000521540642,
"max": 1.725373536594643,
"count": 35
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 187.89839792251587,
"min": -32.000001668930054,
"max": 208.7701979279518,
"count": 35
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04525384172977118,
"min": 0.044075866621892036,
"max": 14.226578075438738,
"count": 35
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.977922590274829,
"min": 4.5608558682433795,
"max": 227.6252492070198,
"count": 35
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 35
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 35
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751359687",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751362101"
},
"total": 2414.0348642490003,
"count": 1,
"self": 0.3698623190007311,
"children": {
"run_training.setup": {
"total": 0.025303245000031893,
"count": 1,
"self": 0.025303245000031893
},
"TrainerController.start_learning": {
"total": 2413.6396986849995,
"count": 1,
"self": 1.3397276149976278,
"children": {
"TrainerController._reset_env": {
"total": 2.7150695519999317,
"count": 1,
"self": 2.7150695519999317
},
"TrainerController.advance": {
"total": 2409.4205432730014,
"count": 68783,
"self": 1.4128835332730887,
"children": {
"env_step": {
"total": 1704.5206358479586,
"count": 68783,
"self": 1548.0207628318972,
"children": {
"SubprocessEnvManager._take_step": {
"total": 155.66843740499598,
"count": 68783,
"self": 4.9325163547723605,
"children": {
"TorchPolicy.evaluate": {
"total": 150.73592105022362,
"count": 67191,
"self": 150.73592105022362
}
}
},
"workers": {
"total": 0.8314356110654444,
"count": 68782,
"self": 0.0,
"children": {
"worker_root": {
"total": 2408.3399773619594,
"count": 68782,
"is_parallel": true,
"self": 980.7310232399786,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002631820000260632,
"count": 1,
"is_parallel": true,
"self": 0.0008023380009944958,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001829481999266136,
"count": 8,
"is_parallel": true,
"self": 0.001829481999266136
}
}
},
"UnityEnvironment.step": {
"total": 0.052426113999899826,
"count": 1,
"is_parallel": true,
"self": 0.000534299000264582,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004928549997202936,
"count": 1,
"is_parallel": true,
"self": 0.0004928549997202936
},
"communicator.exchange": {
"total": 0.04974541199999294,
"count": 1,
"is_parallel": true,
"self": 0.04974541199999294
},
"steps_from_proto": {
"total": 0.0016535479999220115,
"count": 1,
"is_parallel": true,
"self": 0.0003592739990381233,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012942740008838882,
"count": 8,
"is_parallel": true,
"self": 0.0012942740008838882
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1427.6089541219808,
"count": 68781,
"is_parallel": true,
"self": 34.67900780600894,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.240937260080955,
"count": 68781,
"is_parallel": true,
"self": 25.240937260080955
},
"communicator.exchange": {
"total": 1262.7047295149273,
"count": 68781,
"is_parallel": true,
"self": 1262.7047295149273
},
"steps_from_proto": {
"total": 104.9842795409636,
"count": 68781,
"is_parallel": true,
"self": 20.96873414738684,
"children": {
"_process_rank_one_or_two_observation": {
"total": 84.01554539357676,
"count": 550248,
"is_parallel": true,
"self": 84.01554539357676
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 703.4870238917697,
"count": 68782,
"self": 2.613504312592795,
"children": {
"process_trajectory": {
"total": 136.423542858176,
"count": 68782,
"self": 136.22318002717566,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2003628310003478,
"count": 2,
"self": 0.2003628310003478
}
}
},
"_update_policy": {
"total": 564.4499767210009,
"count": 482,
"self": 315.12069266205435,
"children": {
"TorchPPOOptimizer.update": {
"total": 249.32928405894654,
"count": 24495,
"self": 249.32928405894654
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.430999873264227e-06,
"count": 1,
"self": 1.430999873264227e-06
},
"TrainerController._save_models": {
"total": 0.1643568140007119,
"count": 1,
"self": 0.0019024040011572652,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16245440999955463,
"count": 1,
"self": 0.16245440999955463
}
}
}
}
}
}
}