ppo-Pyramids / run_logs /timers.json
ylzHug's picture
First Commit
76f2c6b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31931546330451965,
"min": 0.31324079632759094,
"max": 1.4521349668502808,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9630.5546875,
"min": 9357.12890625,
"max": 44051.96484375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6975789666175842,
"min": -0.272569864988327,
"max": 0.6975789666175842,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 198.80999755859375,
"min": -64.59906005859375,
"max": 198.80999755859375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01004840712994337,
"min": 0.008229592815041542,
"max": 0.47631075978279114,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.8637959957122803,
"min": 2.1973013877868652,
"max": 112.88565063476562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06559790080964809,
"min": 0.06559790080964809,
"max": 0.07448295098487735,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9183706113350733,
"min": 0.5057564832180828,
"max": 1.067751356672756,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014806569684185974,
"min": 0.00043815759177956047,
"max": 0.016199686225531078,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20729197557860363,
"min": 0.004381575917795605,
"max": 0.24254277272334818,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.451690373278572e-06,
"min": 7.451690373278572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010432366522590001,
"min": 0.00010432366522590001,
"max": 0.0032589740136753998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024838642857143,
"min": 0.1024838642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4347741,
"min": 1.3886848,
"max": 2.4858493,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025813804214285717,
"min": 0.00025813804214285717,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036139325900000005,
"min": 0.0036139325900000005,
"max": 0.10865382754,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01382953580468893,
"min": 0.01382953580468893,
"max": 0.5617789626121521,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19361349940299988,
"min": 0.19361349940299988,
"max": 3.93245267868042,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 277.45631067961165,
"min": 273.24528301886795,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28578.0,
"min": 15984.0,
"max": 32559.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7021176313214443,
"min": -1.0000000521540642,
"max": 1.726754701221889,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 173.6159983947873,
"min": -30.589401558041573,
"max": 183.03599832952023,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7021176313214443,
"min": -1.0000000521540642,
"max": 1.726754701221889,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 173.6159983947873,
"min": -30.589401558041573,
"max": 183.03599832952023,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03961917243848838,
"min": 0.03961917243848838,
"max": 11.13578854687512,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.041155588725815,
"min": 4.041155588725815,
"max": 178.1726167500019,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1758789977",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1758792253"
},
"total": 2276.301296884,
"count": 1,
"self": 0.5308441979996132,
"children": {
"run_training.setup": {
"total": 0.03096923200018864,
"count": 1,
"self": 0.03096923200018864
},
"TrainerController.start_learning": {
"total": 2275.739483454,
"count": 1,
"self": 1.5434628680150126,
"children": {
"TrainerController._reset_env": {
"total": 2.2549415229996157,
"count": 1,
"self": 2.2549415229996157
},
"TrainerController.advance": {
"total": 2271.865511199985,
"count": 64126,
"self": 1.5262685509369476,
"children": {
"env_step": {
"total": 1611.6721493779864,
"count": 64126,
"self": 1456.4916511208057,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.2594781141911,
"count": 64126,
"self": 4.798168300246289,
"children": {
"TorchPolicy.evaluate": {
"total": 149.4613098139448,
"count": 62557,
"self": 149.4613098139448
}
}
},
"workers": {
"total": 0.9210201429896188,
"count": 64126,
"self": 0.0,
"children": {
"worker_root": {
"total": 2269.812782494979,
"count": 64126,
"is_parallel": true,
"self": 933.7273671610092,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001899197000057029,
"count": 1,
"is_parallel": true,
"self": 0.0006291049999163079,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012700920001407212,
"count": 8,
"is_parallel": true,
"self": 0.0012700920001407212
}
}
},
"UnityEnvironment.step": {
"total": 0.05225823399996443,
"count": 1,
"is_parallel": true,
"self": 0.0005746410001847835,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045884699966336484,
"count": 1,
"is_parallel": true,
"self": 0.00045884699966336484
},
"communicator.exchange": {
"total": 0.04934073000003991,
"count": 1,
"is_parallel": true,
"self": 0.04934073000003991
},
"steps_from_proto": {
"total": 0.0018840160000763717,
"count": 1,
"is_parallel": true,
"self": 0.0004200970006422722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014639189994340995,
"count": 8,
"is_parallel": true,
"self": 0.0014639189994340995
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1336.0854153339696,
"count": 64125,
"is_parallel": true,
"self": 33.873171795823055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.176790329046526,
"count": 64125,
"is_parallel": true,
"self": 24.176790329046526
},
"communicator.exchange": {
"total": 1166.7797517380445,
"count": 64125,
"is_parallel": true,
"self": 1166.7797517380445
},
"steps_from_proto": {
"total": 111.25570147105554,
"count": 64125,
"is_parallel": true,
"self": 23.473260051271154,
"children": {
"_process_rank_one_or_two_observation": {
"total": 87.78244141978439,
"count": 513000,
"is_parallel": true,
"self": 87.78244141978439
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 658.6670932710617,
"count": 64126,
"self": 2.8879338011533946,
"children": {
"process_trajectory": {
"total": 128.18365811291233,
"count": 64126,
"self": 127.99375911591187,
"children": {
"RLTrainer._checkpoint": {
"total": 0.18989899700045498,
"count": 2,
"self": 0.18989899700045498
}
}
},
"_update_policy": {
"total": 527.595501356996,
"count": 447,
"self": 294.08744115000945,
"children": {
"TorchPPOOptimizer.update": {
"total": 233.50806020698656,
"count": 22824,
"self": 233.50806020698656
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.650002539274283e-07,
"count": 1,
"self": 9.650002539274283e-07
},
"TrainerController._save_models": {
"total": 0.07556689800003369,
"count": 1,
"self": 0.0011185720004505129,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07444832599958318,
"count": 1,
"self": 0.07444832599958318
}
}
}
}
}
}
}