ppo-PyramidsRND / run_logs /timers.json
jaruiz's picture
first model
7fc30ee verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.44520634412765503,
"min": 0.44520634412765503,
"max": 1.3810703754425049,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13477.2861328125,
"min": 13477.2861328125,
"max": 41896.15234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.43212783336639404,
"min": -0.18313734233379364,
"max": 0.4684053063392639,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 116.24238586425781,
"min": -43.40354919433594,
"max": 126.00102996826172,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.45579254627227783,
"min": -0.014988468959927559,
"max": 0.5163129568099976,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 122.60819244384766,
"min": -3.8070712089538574,
"max": 122.60819244384766,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06715057970787812,
"min": 0.06397525224795876,
"max": 0.07444999086032576,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0072586956181717,
"min": 0.5205023378420965,
"max": 1.0422998720445606,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.026387546092478772,
"min": 0.00014067862458904144,
"max": 0.027181496368496125,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3958131913871816,
"min": 0.0018288221196575388,
"max": 0.3958131913871816,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.553237482286666e-06,
"min": 7.553237482286666e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011329856223429998,
"min": 0.00011329856223429998,
"max": 0.0035073944308686,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251771333333336,
"min": 0.10251771333333336,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377657000000005,
"min": 1.3886848,
"max": 2.5691314000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000261519562,
"min": 0.000261519562,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00392279343,
"min": 0.00392279343,
"max": 0.11693622685999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01139814779162407,
"min": 0.011248939670622349,
"max": 0.536886990070343,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17097221314907074,
"min": 0.15748515725135803,
"max": 3.758208751678467,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 407.8904109589041,
"min": 407.8904109589041,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29776.0,
"min": 15984.0,
"max": 33297.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5098931326033318,
"min": -1.0000000521540642,
"max": 1.5100130878511022,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 110.22219868004322,
"min": -31.996001675724983,
"max": 110.22219868004322,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5098931326033318,
"min": -1.0000000521540642,
"max": 1.5100130878511022,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 110.22219868004322,
"min": -31.996001675724983,
"max": 110.22219868004322,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04831758370600666,
"min": 0.04831758370600666,
"max": 10.49727188050747,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5271836105384864,
"min": 3.319309462356614,
"max": 167.9563500881195,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756736032",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756739191"
},
"total": 3159.483036627,
"count": 1,
"self": 0.784690239000156,
"children": {
"run_training.setup": {
"total": 0.03539846199987551,
"count": 1,
"self": 0.03539846199987551
},
"TrainerController.start_learning": {
"total": 3158.662947926,
"count": 1,
"self": 2.2049312730096062,
"children": {
"TrainerController._reset_env": {
"total": 2.5674827580000965,
"count": 1,
"self": 2.5674827580000965
},
"TrainerController.advance": {
"total": 3153.812066394991,
"count": 63622,
"self": 2.3488012889456513,
"children": {
"env_step": {
"total": 2082.12002715005,
"count": 63622,
"self": 1921.229188267994,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.55175991598435,
"count": 63622,
"self": 7.067049272874328,
"children": {
"TorchPolicy.evaluate": {
"total": 152.48471064311002,
"count": 62559,
"self": 152.48471064311002
}
}
},
"workers": {
"total": 1.3390789660718383,
"count": 63622,
"self": 0.0,
"children": {
"worker_root": {
"total": 3149.884490215956,
"count": 63622,
"is_parallel": true,
"self": 1405.922514306957,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0032762359999196633,
"count": 1,
"is_parallel": true,
"self": 0.0012652909997541428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020109450001655205,
"count": 8,
"is_parallel": true,
"self": 0.0020109450001655205
}
}
},
"UnityEnvironment.step": {
"total": 0.06552208800007975,
"count": 1,
"is_parallel": true,
"self": 0.0007371579999926325,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005448760000490438,
"count": 1,
"is_parallel": true,
"self": 0.0005448760000490438
},
"communicator.exchange": {
"total": 0.062124616999881255,
"count": 1,
"is_parallel": true,
"self": 0.062124616999881255
},
"steps_from_proto": {
"total": 0.002115437000156817,
"count": 1,
"is_parallel": true,
"self": 0.00047439400032089907,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016410429998359177,
"count": 8,
"is_parallel": true,
"self": 0.0016410429998359177
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1743.9619759089987,
"count": 63621,
"is_parallel": true,
"self": 44.466937608948456,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.986048664010468,
"count": 63621,
"is_parallel": true,
"self": 30.986048664010468
},
"communicator.exchange": {
"total": 1541.1463884331004,
"count": 63621,
"is_parallel": true,
"self": 1541.1463884331004
},
"steps_from_proto": {
"total": 127.36260120293946,
"count": 63621,
"is_parallel": true,
"self": 27.454550153152695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 99.90805104978676,
"count": 508968,
"is_parallel": true,
"self": 99.90805104978676
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1069.343237955995,
"count": 63622,
"self": 4.333086090933648,
"children": {
"process_trajectory": {
"total": 159.40494199206137,
"count": 63622,
"self": 159.2158299930618,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18911199899957865,
"count": 2,
"self": 0.18911199899957865
}
}
},
"_update_policy": {
"total": 905.6052098729999,
"count": 452,
"self": 356.510550168922,
"children": {
"TorchPPOOptimizer.update": {
"total": 549.0946597040779,
"count": 22749,
"self": 549.0946597040779
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.779996616998687e-07,
"count": 1,
"self": 9.779996616998687e-07
},
"TrainerController._save_models": {
"total": 0.07846652199987147,
"count": 1,
"self": 0.0019340090002515353,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07653251299961994,
"count": 1,
"self": 0.07653251299961994
}
}
}
}
}
}
}