ppo-Pyramids / run_logs /timers.json
winssu's picture
Initial Commit
0e7168b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.23797956109046936,
"min": 0.23797956109046936,
"max": 1.3762589693069458,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7120.3486328125,
"min": 7120.3486328125,
"max": 41750.19140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989884.0,
"min": 29952.0,
"max": 989884.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989884.0,
"min": 29952.0,
"max": 989884.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7378281354904175,
"min": -0.0958128571510315,
"max": 0.7378281354904175,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 216.92147827148438,
"min": -22.899272918701172,
"max": 216.92147827148438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015895385295152664,
"min": -0.013426374644041061,
"max": 0.3426330089569092,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.673243045806885,
"min": -3.638547420501709,
"max": 82.57455444335938,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06940749172090796,
"min": 0.06501277138001979,
"max": 0.07472376028348182,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9717048840927115,
"min": 0.5030254713348622,
"max": 1.0754707626160689,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014973445392873453,
"min": 9.112080120959689e-05,
"max": 0.016006233586397554,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20962823550022833,
"min": 0.0011845704157247595,
"max": 0.23748045564085865,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.533118917564294e-06,
"min": 7.533118917564294e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010546366484590011,
"min": 0.00010546366484590011,
"max": 0.0036330460889846994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251100714285717,
"min": 0.10251100714285717,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351541000000003,
"min": 1.3886848,
"max": 2.6110153000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026084961357142886,
"min": 0.00026084961357142886,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036518945900000043,
"min": 0.0036518945900000043,
"max": 0.12112042847000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012284232303500175,
"min": 0.012209154665470123,
"max": 0.4757020175457001,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17197924852371216,
"min": 0.17092816531658173,
"max": 3.329914093017578,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 254.1764705882353,
"min": 254.1764705882353,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30247.0,
"min": 15984.0,
"max": 33412.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7290117544035952,
"min": -1.0000000521540642,
"max": 1.7290117544035952,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 205.75239877402782,
"min": -30.576001703739166,
"max": 205.75239877402782,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7290117544035952,
"min": -1.0000000521540642,
"max": 1.7290117544035952,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 205.75239877402782,
"min": -30.576001703739166,
"max": 205.75239877402782,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03253405797992297,
"min": 0.03253405797992297,
"max": 9.337946828454733,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8715528996108333,
"min": 3.6960685305966763,
"max": 149.40714925527573,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747927135",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747930312"
},
"total": 3176.691407279,
"count": 1,
"self": 0.5875935489998483,
"children": {
"run_training.setup": {
"total": 0.03733900700012782,
"count": 1,
"self": 0.03733900700012782
},
"TrainerController.start_learning": {
"total": 3176.066474723,
"count": 1,
"self": 2.3928807590568795,
"children": {
"TrainerController._reset_env": {
"total": 3.9162066349999805,
"count": 1,
"self": 3.9162066349999805
},
"TrainerController.advance": {
"total": 3169.6721310889425,
"count": 64202,
"self": 2.563738766865754,
"children": {
"env_step": {
"total": 2132.544880050033,
"count": 64202,
"self": 1973.5777629570339,
"children": {
"SubprocessEnvManager._take_step": {
"total": 157.62763653499815,
"count": 64202,
"self": 6.962649240043902,
"children": {
"TorchPolicy.evaluate": {
"total": 150.66498729495424,
"count": 62557,
"self": 150.66498729495424
}
}
},
"workers": {
"total": 1.3394805580010143,
"count": 64202,
"self": 0.0,
"children": {
"worker_root": {
"total": 3168.8218247099835,
"count": 64202,
"is_parallel": true,
"self": 1365.5821874760165,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003908693000084895,
"count": 1,
"is_parallel": true,
"self": 0.001402389000304538,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002506303999780357,
"count": 8,
"is_parallel": true,
"self": 0.002506303999780357
}
}
},
"UnityEnvironment.step": {
"total": 0.1635672100001102,
"count": 1,
"is_parallel": true,
"self": 0.0006251830000110203,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005295720000049187,
"count": 1,
"is_parallel": true,
"self": 0.0005295720000049187
},
"communicator.exchange": {
"total": 0.16065895200017621,
"count": 1,
"is_parallel": true,
"self": 0.16065895200017621
},
"steps_from_proto": {
"total": 0.0017535029999180551,
"count": 1,
"is_parallel": true,
"self": 0.00035826799944516097,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013952350004728942,
"count": 8,
"is_parallel": true,
"self": 0.0013952350004728942
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1803.239637233967,
"count": 64201,
"is_parallel": true,
"self": 44.713814938863834,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.007144592068016,
"count": 64201,
"is_parallel": true,
"self": 31.007144592068016
},
"communicator.exchange": {
"total": 1600.1586063111117,
"count": 64201,
"is_parallel": true,
"self": 1600.1586063111117
},
"steps_from_proto": {
"total": 127.36007139192338,
"count": 64201,
"is_parallel": true,
"self": 27.975875163406272,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.38419622851711,
"count": 513608,
"is_parallel": true,
"self": 99.38419622851711
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1034.5635122720437,
"count": 64202,
"self": 4.5667505860353685,
"children": {
"process_trajectory": {
"total": 162.054659459003,
"count": 64202,
"self": 161.7196996410023,
"children": {
"RLTrainer._checkpoint": {
"total": 0.33495981800069785,
"count": 2,
"self": 0.33495981800069785
}
}
},
"_update_policy": {
"total": 867.9421022270053,
"count": 455,
"self": 352.2108937890373,
"children": {
"TorchPPOOptimizer.update": {
"total": 515.731208437968,
"count": 22800,
"self": 515.731208437968
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1100000847363845e-06,
"count": 1,
"self": 1.1100000847363845e-06
},
"TrainerController._save_models": {
"total": 0.08525513000040519,
"count": 1,
"self": 0.0017056670003512409,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08354946300005395,
"count": 1,
"self": 0.08354946300005395
}
}
}
}
}
}
}