ppo-Pyramids / run_logs /timers.json
cxyzs7's picture
First Push
b7efd3f verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2815791368484497,
"min": 0.26410967111587524,
"max": 1.4433350563049316,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8420.3427734375,
"min": 7952.8701171875,
"max": 43785.01171875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989974.0,
"min": 29952.0,
"max": 989974.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989974.0,
"min": 29952.0,
"max": 989974.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6340747475624084,
"min": -0.12323407828807831,
"max": 0.6357510685920715,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 179.44314575195312,
"min": -29.57617950439453,
"max": 180.19476318359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.006997785065323114,
"min": -0.006997785065323114,
"max": 0.4847884178161621,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -1.9803731441497803,
"min": -1.9803731441497803,
"max": 114.89485168457031,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06771040390788888,
"min": 0.06565829043262586,
"max": 0.07423109274693869,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0156560586183332,
"min": 0.5014007827912688,
"max": 1.073355843549293,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013978243890991495,
"min": 0.00048400736985462317,
"max": 0.015833817790715125,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20967365836487242,
"min": 0.005808088438255478,
"max": 0.2375072668607269,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.508037497353334e-06,
"min": 7.508037497353334e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001126205624603,
"min": 0.0001126205624603,
"max": 0.0035087528304158,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025026466666667,
"min": 0.1025026466666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375397000000004,
"min": 1.3886848,
"max": 2.5695842000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000260014402,
"min": 0.000260014402,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0039002160299999996,
"min": 0.0039002160299999996,
"max": 0.11698146158,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011765619739890099,
"min": 0.011765619739890099,
"max": 0.47923702001571655,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17648430168628693,
"min": 0.17331907153129578,
"max": 3.354659080505371,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 324.7956989247312,
"min": 309.03125,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30206.0,
"min": 15984.0,
"max": 32943.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.610679548434032,
"min": -1.0000000521540642,
"max": 1.610679548434032,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 149.79319800436497,
"min": -29.897601634263992,
"max": 163.33459869027138,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.610679548434032,
"min": -1.0000000521540642,
"max": 1.610679548434032,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 149.79319800436497,
"min": -29.897601634263992,
"max": 163.33459869027138,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04013715205123893,
"min": 0.039013729638933604,
"max": 9.595420572906733,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.732755140765221,
"min": 3.637642389527173,
"max": 153.52672916650772,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751681982",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training--force --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751684198"
},
"total": 2216.349738875,
"count": 1,
"self": 0.5000486310000269,
"children": {
"run_training.setup": {
"total": 0.020424682999873767,
"count": 1,
"self": 0.020424682999873767
},
"TrainerController.start_learning": {
"total": 2215.8292655610003,
"count": 1,
"self": 1.3335424570263967,
"children": {
"TrainerController._reset_env": {
"total": 2.259902538999995,
"count": 1,
"self": 2.259902538999995
},
"TrainerController.advance": {
"total": 2212.1512341039743,
"count": 63913,
"self": 1.5252757710832157,
"children": {
"env_step": {
"total": 1563.0030217729068,
"count": 63913,
"self": 1410.9820126908307,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.1900411480524,
"count": 63913,
"self": 4.643048821046705,
"children": {
"TorchPolicy.evaluate": {
"total": 146.54699232700568,
"count": 62562,
"self": 146.54699232700568
}
}
},
"workers": {
"total": 0.8309679340236471,
"count": 63913,
"self": 0.0,
"children": {
"worker_root": {
"total": 2210.34583594405,
"count": 63913,
"is_parallel": true,
"self": 914.5653786050652,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018950979999772244,
"count": 1,
"is_parallel": true,
"self": 0.0006416309997803182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012534670001969062,
"count": 8,
"is_parallel": true,
"self": 0.0012534670001969062
}
}
},
"UnityEnvironment.step": {
"total": 0.0472627530000409,
"count": 1,
"is_parallel": true,
"self": 0.0005265940001208946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004924019999634766,
"count": 1,
"is_parallel": true,
"self": 0.0004924019999634766
},
"communicator.exchange": {
"total": 0.044191999000076976,
"count": 1,
"is_parallel": true,
"self": 0.044191999000076976
},
"steps_from_proto": {
"total": 0.0020517579998795554,
"count": 1,
"is_parallel": true,
"self": 0.0004183440000815608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016334139997979946,
"count": 8,
"is_parallel": true,
"self": 0.0016334139997979946
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1295.780457338985,
"count": 63912,
"is_parallel": true,
"self": 31.95620804500186,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.218502664010657,
"count": 63912,
"is_parallel": true,
"self": 23.218502664010657
},
"communicator.exchange": {
"total": 1143.4281385669956,
"count": 63912,
"is_parallel": true,
"self": 1143.4281385669956
},
"steps_from_proto": {
"total": 97.17760806297679,
"count": 63912,
"is_parallel": true,
"self": 19.755029070046703,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.42257899293008,
"count": 511296,
"is_parallel": true,
"self": 77.42257899293008
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 647.6229365599845,
"count": 63913,
"self": 2.6369402880091,
"children": {
"process_trajectory": {
"total": 126.06523323996976,
"count": 63913,
"self": 125.8632770749698,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20195616499995594,
"count": 2,
"self": 0.20195616499995594
}
}
},
"_update_policy": {
"total": 518.9207630320057,
"count": 452,
"self": 289.5463617950211,
"children": {
"TorchPPOOptimizer.update": {
"total": 229.37440123698457,
"count": 22794,
"self": 229.37440123698457
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1009997251676396e-06,
"count": 1,
"self": 1.1009997251676396e-06
},
"TrainerController._save_models": {
"total": 0.08458535999989181,
"count": 1,
"self": 0.0011138629997731186,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08347149700011869,
"count": 1,
"self": 0.08347149700011869
}
}
}
}
}
}
}