PyramidsRND / run_logs /timers.json
palushok's picture
Push to Hub
6fecf99 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35679852962493896,
"min": 0.35679852962493896,
"max": 1.4458413124084473,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10732.5,
"min": 10732.5,
"max": 43861.04296875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989972.0,
"min": 29952.0,
"max": 989972.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5421725511550903,
"min": -0.11502400040626526,
"max": 0.665208637714386,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 150.18179321289062,
"min": -27.720783233642578,
"max": 189.58445739746094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.004160713404417038,
"min": -0.0011252019321545959,
"max": 0.35322853922843933,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.152517557144165,
"min": -0.3128061294555664,
"max": 84.62960815429688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06711535931223121,
"min": 0.06364875270572669,
"max": 0.07235009140070117,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9396150303712371,
"min": 0.49188900694228316,
"max": 1.0792314634309152,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01621282336471507,
"min": 0.0002343982867509289,
"max": 0.016271474770855134,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22697952710601094,
"min": 0.0030471777277620757,
"max": 0.22780064679197187,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.4898975033999995e-06,
"min": 7.4898975033999995e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001048585650476,
"min": 0.0001048585650476,
"max": 0.0033728584757138995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249660000000002,
"min": 0.10249660000000002,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349524000000002,
"min": 1.3886848,
"max": 2.4242861,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025941033999999995,
"min": 0.00025941033999999995,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036317447599999997,
"min": 0.0036317447599999997,
"max": 0.11244618139000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009280407801270485,
"min": 0.009280407801270485,
"max": 0.48302364349365234,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1299257129430771,
"min": 0.1299257129430771,
"max": 3.3811655044555664,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 330.752688172043,
"min": 299.7291666666667,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30760.0,
"min": 15984.0,
"max": 32702.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5832064287156187,
"min": -1.0000000521540642,
"max": 1.6984040296137934,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 147.23819787055254,
"min": -30.73400168120861,
"max": 168.14199893176556,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5832064287156187,
"min": -1.0000000521540642,
"max": 1.6984040296137934,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 147.23819787055254,
"min": -30.73400168120861,
"max": 168.14199893176556,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03212861595356146,
"min": 0.030104450516015884,
"max": 10.289079973474145,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9879612836812157,
"min": 2.7841255092353094,
"max": 164.62527957558632,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1743375193",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1743376332"
},
"total": 1138.6493281560251,
"count": 1,
"self": 1.0724825290963054,
"children": {
"run_training.setup": {
"total": 0.018618091009557247,
"count": 1,
"self": 0.018618091009557247
},
"TrainerController.start_learning": {
"total": 1137.5582275359193,
"count": 1,
"self": 0.7853155623888597,
"children": {
"TrainerController._reset_env": {
"total": 3.875911117065698,
"count": 1,
"self": 3.875911117065698
},
"TrainerController.advance": {
"total": 1132.8077018995536,
"count": 63961,
"self": 0.777552910731174,
"children": {
"env_step": {
"total": 683.3575973187108,
"count": 63961,
"self": 588.5494736894034,
"children": {
"SubprocessEnvManager._take_step": {
"total": 94.33111253927927,
"count": 63961,
"self": 2.9149739193962887,
"children": {
"TorchPolicy.evaluate": {
"total": 91.41613861988299,
"count": 62577,
"self": 91.41613861988299
}
}
},
"workers": {
"total": 0.4770110900280997,
"count": 63961,
"self": 0.0,
"children": {
"worker_root": {
"total": 1136.1145696754102,
"count": 63961,
"is_parallel": true,
"self": 606.6846250903327,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0014298500027507544,
"count": 1,
"is_parallel": true,
"self": 0.0004531999584287405,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009766500443220139,
"count": 8,
"is_parallel": true,
"self": 0.0009766500443220139
}
}
},
"UnityEnvironment.step": {
"total": 0.025487688020803034,
"count": 1,
"is_parallel": true,
"self": 0.00027467694599181414,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022860709577798843,
"count": 1,
"is_parallel": true,
"self": 0.00022860709577798843
},
"communicator.exchange": {
"total": 0.02416202798485756,
"count": 1,
"is_parallel": true,
"self": 0.02416202798485756
},
"steps_from_proto": {
"total": 0.0008223759941756725,
"count": 1,
"is_parallel": true,
"self": 0.00019058911129832268,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006317868828773499,
"count": 8,
"is_parallel": true,
"self": 0.0006317868828773499
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 529.4299445850775,
"count": 63960,
"is_parallel": true,
"self": 14.461268611834385,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 9.63041244528722,
"count": 63960,
"is_parallel": true,
"self": 9.63041244528722
},
"communicator.exchange": {
"total": 460.2224515932612,
"count": 63960,
"is_parallel": true,
"self": 460.2224515932612
},
"steps_from_proto": {
"total": 45.11581193469465,
"count": 63960,
"is_parallel": true,
"self": 9.510044377064332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.605767557630315,
"count": 511680,
"is_parallel": true,
"self": 35.605767557630315
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 448.6725516701117,
"count": 63961,
"self": 1.4725131987361237,
"children": {
"process_trajectory": {
"total": 86.82653168123215,
"count": 63961,
"self": 86.52914107823744,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2973906029947102,
"count": 2,
"self": 0.2973906029947102
}
}
},
"_update_policy": {
"total": 360.37350679014344,
"count": 445,
"self": 198.5389704372501,
"children": {
"TorchPPOOptimizer.update": {
"total": 161.83453635289334,
"count": 22827,
"self": 161.83453635289334
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.849885150790215e-07,
"count": 1,
"self": 7.849885150790215e-07
},
"TrainerController._save_models": {
"total": 0.0892981719225645,
"count": 1,
"self": 0.004541158908978105,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0847570130135864,
"count": 1,
"self": 0.0847570130135864
}
}
}
}
}
}
}