PyramidsRND1 / run_logs /timers.json
S-Chaves's picture
First Commit
9691273 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3326510787010193,
"min": 0.3230558931827545,
"max": 1.4894448518753052,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9798.5703125,
"min": 9798.5703125,
"max": 45183.80078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989915.0,
"min": 29946.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989915.0,
"min": 29946.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.4596026837825775,
"min": -0.1156543418765068,
"max": 0.5197778940200806,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 123.17351531982422,
"min": -27.757041931152344,
"max": 141.3795928955078,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0023681391030550003,
"min": -0.015618355944752693,
"max": 0.3161812126636505,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.6346612572669983,
"min": -4.24819278717041,
"max": 74.93494415283203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0678751615226805,
"min": 0.06474885821047764,
"max": 0.07475457629755398,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9502522613175269,
"min": 0.5390259911140234,
"max": 1.0396951321323,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014891964754191455,
"min": 0.00039158475779309547,
"max": 0.015148671742367911,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20848750655868037,
"min": 0.00430743233572405,
"max": 0.21265881514652094,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.732125994085712e-06,
"min": 7.732125994085712e-06,
"max": 0.000295014226661925,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010824976391719998,
"min": 0.00010824976391719998,
"max": 0.0033818159727280997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257734285714286,
"min": 0.10257734285714286,
"max": 0.198338075,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4360828,
"min": 1.4360828,
"max": 2.5273868999999998,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026747655142857137,
"min": 0.00026747655142857137,
"max": 0.0098339736925,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037446717199999995,
"min": 0.0037446717199999995,
"max": 0.11275446281000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010532896034419537,
"min": 0.010532896034419537,
"max": 0.3225255310535431,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14746055006980896,
"min": 0.14746055006980896,
"max": 2.5802042484283447,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 399.46666666666664,
"min": 379.7294117647059,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29960.0,
"min": 16873.0,
"max": 34673.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4359107913600433,
"min": -0.9999375520274043,
"max": 1.5302584359279046,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 106.2573985606432,
"min": -31.998001664876938,
"max": 124.09419818967581,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4359107913600433,
"min": -0.9999375520274043,
"max": 1.5302584359279046,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 106.2573985606432,
"min": -31.998001664876938,
"max": 124.09419818967581,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.043802913162712565,
"min": 0.043461986211782676,
"max": 6.801037587663707,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2414155740407296,
"min": 3.1300154372875113,
"max": 115.61763899028301,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744418879",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744421099"
},
"total": 2220.4605057890003,
"count": 1,
"self": 0.47592336000070645,
"children": {
"run_training.setup": {
"total": 0.02097656299974915,
"count": 1,
"self": 0.02097656299974915
},
"TrainerController.start_learning": {
"total": 2219.963605866,
"count": 1,
"self": 1.287990899905708,
"children": {
"TrainerController._reset_env": {
"total": 3.024095395000586,
"count": 1,
"self": 3.024095395000586
},
"TrainerController.advance": {
"total": 2215.5654601560946,
"count": 63642,
"self": 1.358040273923507,
"children": {
"env_step": {
"total": 1533.085865783004,
"count": 63642,
"self": 1383.526175805001,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.8237194159101,
"count": 63642,
"self": 4.505598698050562,
"children": {
"TorchPolicy.evaluate": {
"total": 144.31812071785953,
"count": 62565,
"self": 144.31812071785953
}
}
},
"workers": {
"total": 0.7359705620929162,
"count": 63642,
"self": 0.0,
"children": {
"worker_root": {
"total": 2214.9881754300213,
"count": 63642,
"is_parallel": true,
"self": 941.6920669539231,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0024133739998433157,
"count": 1,
"is_parallel": true,
"self": 0.0007029499993222998,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017104240005210158,
"count": 8,
"is_parallel": true,
"self": 0.0017104240005210158
}
}
},
"UnityEnvironment.step": {
"total": 0.04965212700062693,
"count": 1,
"is_parallel": true,
"self": 0.0006086140010665986,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005258740002318518,
"count": 1,
"is_parallel": true,
"self": 0.0005258740002318518
},
"communicator.exchange": {
"total": 0.04689568499998131,
"count": 1,
"is_parallel": true,
"self": 0.04689568499998131
},
"steps_from_proto": {
"total": 0.0016219539993471699,
"count": 1,
"is_parallel": true,
"self": 0.0003639349988588947,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012580190004882752,
"count": 8,
"is_parallel": true,
"self": 0.0012580190004882752
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.2961084760982,
"count": 63641,
"is_parallel": true,
"self": 31.6072275801489,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.28329508392926,
"count": 63641,
"is_parallel": true,
"self": 23.28329508392926
},
"communicator.exchange": {
"total": 1124.1632278851048,
"count": 63641,
"is_parallel": true,
"self": 1124.1632278851048
},
"steps_from_proto": {
"total": 94.24235792691525,
"count": 63641,
"is_parallel": true,
"self": 18.58437814986246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.65797977705279,
"count": 509128,
"is_parallel": true,
"self": 75.65797977705279
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 681.121554099167,
"count": 63642,
"self": 2.471611600174583,
"children": {
"process_trajectory": {
"total": 126.07052969097822,
"count": 63642,
"self": 125.80845996497828,
"children": {
"RLTrainer._checkpoint": {
"total": 0.26206972599993605,
"count": 2,
"self": 0.26206972599993605
}
}
},
"_update_policy": {
"total": 552.5794128080142,
"count": 451,
"self": 302.45880873806436,
"children": {
"TorchPPOOptimizer.update": {
"total": 250.12060406994988,
"count": 22830,
"self": 250.12060406994988
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.81999619398266e-07,
"count": 1,
"self": 9.81999619398266e-07
},
"TrainerController._save_models": {
"total": 0.08605843299937987,
"count": 1,
"self": 0.001498866999099846,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08455956600028003,
"count": 1,
"self": 0.08455956600028003
}
}
}
}
}
}
}