Pyramids / run_logs /timers.json
ShayanDarabi's picture
First Push
2943d3e verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4350137710571289,
"min": 0.4265413284301758,
"max": 1.448898196220398,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13113.0546875,
"min": 12891.78515625,
"max": 43953.77734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989993.0,
"min": 29914.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989993.0,
"min": 29914.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41426029801368713,
"min": -0.1939813792705536,
"max": 0.41426029801368713,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 110.60749816894531,
"min": -45.97358703613281,
"max": 110.60749816894531,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.26519113779067993,
"min": -0.26519113779067993,
"max": 0.2595091164112091,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -70.8060302734375,
"min": -70.8060302734375,
"max": 62.80120849609375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07249231915252483,
"min": 0.06485588262158086,
"max": 0.07448953349098919,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0148924681353475,
"min": 0.5134969463775299,
"max": 1.0755207141046415,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.02350611888723812,
"min": 9.573241595768217e-05,
"max": 0.02482281828115167,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3290856644213337,
"min": 0.0013402538234075503,
"max": 0.3475194559361234,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.315533275807137e-06,
"min": 7.315533275807137e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010241746586129991,
"min": 0.00010241746586129991,
"max": 0.003633872288709299,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024384785714286,
"min": 0.1024384785714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4341387000000003,
"min": 1.3886848,
"max": 2.6112906999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025360400928571406,
"min": 0.00025360400928571406,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003550456129999997,
"min": 0.003550456129999997,
"max": 0.12114794093000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011065131984651089,
"min": 0.010304251685738564,
"max": 0.3430934250354767,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1549118459224701,
"min": 0.1442595273256302,
"max": 2.401654005050659,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 389.67105263157896,
"min": 389.67105263157896,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29615.0,
"min": 16841.0,
"max": 33080.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.481546648144722,
"min": -0.9996875515207648,
"max": 1.481546648144722,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 111.11599861085415,
"min": -31.990001648664474,
"max": 111.11599861085415,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.481546648144722,
"min": -0.9996875515207648,
"max": 1.481546648144722,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 111.11599861085415,
"min": -31.990001648664474,
"max": 111.11599861085415,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.044010712603922,
"min": 0.044010712603922,
"max": 6.1528357880956985,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.30080344529415,
"min": 3.1163453327608295,
"max": 104.59820839762688,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1758146285",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1758147933"
},
"total": 1647.7058888450001,
"count": 1,
"self": 0.37211878500147577,
"children": {
"run_training.setup": {
"total": 0.024353899999368878,
"count": 1,
"self": 0.024353899999368878
},
"TrainerController.start_learning": {
"total": 1647.3094161599993,
"count": 1,
"self": 1.2296494608972353,
"children": {
"TrainerController._reset_env": {
"total": 2.1676367999998547,
"count": 1,
"self": 2.1676367999998547
},
"TrainerController.advance": {
"total": 1643.833313979102,
"count": 63518,
"self": 1.169521133998387,
"children": {
"env_step": {
"total": 1012.973452448995,
"count": 63518,
"self": 875.4385643982505,
"children": {
"SubprocessEnvManager._take_step": {
"total": 136.8001677479133,
"count": 63518,
"self": 4.187284010891744,
"children": {
"TorchPolicy.evaluate": {
"total": 132.61288373702155,
"count": 62557,
"self": 132.61288373702155
}
}
},
"workers": {
"total": 0.7347203028311924,
"count": 63518,
"self": 0.0,
"children": {
"worker_root": {
"total": 1645.3229921359898,
"count": 63518,
"is_parallel": true,
"self": 856.4614480040118,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002043696999862732,
"count": 1,
"is_parallel": true,
"self": 0.0006568849994437187,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013868120004190132,
"count": 8,
"is_parallel": true,
"self": 0.0013868120004190132
}
}
},
"UnityEnvironment.step": {
"total": 0.03858072099956189,
"count": 1,
"is_parallel": true,
"self": 0.000439674000517698,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003295999995316379,
"count": 1,
"is_parallel": true,
"self": 0.0003295999995316379
},
"communicator.exchange": {
"total": 0.03652156899988768,
"count": 1,
"is_parallel": true,
"self": 0.03652156899988768
},
"steps_from_proto": {
"total": 0.0012898779996248777,
"count": 1,
"is_parallel": true,
"self": 0.00033262000033573713,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009572579992891406,
"count": 8,
"is_parallel": true,
"self": 0.0009572579992891406
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 788.861544131978,
"count": 63517,
"is_parallel": true,
"self": 19.61504065508143,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.665959033975014,
"count": 63517,
"is_parallel": true,
"self": 13.665959033975014
},
"communicator.exchange": {
"total": 696.1940708950742,
"count": 63517,
"is_parallel": true,
"self": 696.1940708950742
},
"steps_from_proto": {
"total": 59.386473547847345,
"count": 63517,
"is_parallel": true,
"self": 12.625899970039427,
"children": {
"_process_rank_one_or_two_observation": {
"total": 46.76057357780792,
"count": 508136,
"is_parallel": true,
"self": 46.76057357780792
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 629.6903403961087,
"count": 63518,
"self": 2.447412737167724,
"children": {
"process_trajectory": {
"total": 116.10428057494664,
"count": 63518,
"self": 115.91502333394601,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18925724100063235,
"count": 2,
"self": 0.18925724100063235
}
}
},
"_update_policy": {
"total": 511.13864708399433,
"count": 457,
"self": 280.1134133019168,
"children": {
"TorchPPOOptimizer.update": {
"total": 231.02523378207752,
"count": 22809,
"self": 231.02523378207752
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.030000021273736e-06,
"count": 1,
"self": 1.030000021273736e-06
},
"TrainerController._save_models": {
"total": 0.07881489000010333,
"count": 1,
"self": 0.001300478999837651,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07751441100026568,
"count": 1,
"self": 0.07751441100026568
}
}
}
}
}
}
}