ppo-PyramidsRND / run_logs /timers.json
ydabke's picture
first
3debf60 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4506365656852722,
"min": 0.4301571547985077,
"max": 1.4723435640335083,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13346.052734375,
"min": 12946.009765625,
"max": 44665.015625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989977.0,
"min": 29883.0,
"max": 989977.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989977.0,
"min": 29883.0,
"max": 989977.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.44706985354423523,
"min": -0.0852995216846466,
"max": 0.5319568514823914,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 120.26178741455078,
"min": -20.471885681152344,
"max": 144.6922607421875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.017799345776438713,
"min": -0.01606505550444126,
"max": 0.24433672428131104,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.788023948669434,
"min": -4.241174697875977,
"max": 59.129486083984375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0670831149431049,
"min": 0.06452710820650238,
"max": 0.07401372389455955,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0062467241465736,
"min": 0.5180960672619169,
"max": 1.0326667025025624,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012940356377253518,
"min": 0.00041702614363527796,
"max": 0.015683270606608686,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19410534565880277,
"min": 0.005421339867258614,
"max": 0.2195657884925216,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.548697483799997e-06,
"min": 7.548697483799997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011323046225699996,
"min": 0.00011323046225699996,
"max": 0.0035081804306065995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251619999999997,
"min": 0.10251619999999997,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5377429999999996,
"min": 1.3886848,
"max": 2.5693934,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002613683799999999,
"min": 0.0002613683799999999,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003920525699999998,
"min": 0.003920525699999998,
"max": 0.11696240066000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010470191016793251,
"min": 0.010470191016793251,
"max": 0.42444613575935364,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15705285966396332,
"min": 0.15179023146629333,
"max": 2.971122980117798,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 430.69117647058823,
"min": 372.61538461538464,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29287.0,
"min": 16810.0,
"max": 32956.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3918119219701681,
"min": -0.9999600519736608,
"max": 1.549633308289907,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 93.25139877200127,
"min": -29.998801559209824,
"max": 120.87139804661274,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3918119219701681,
"min": -0.9999600519736608,
"max": 1.549633308289907,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 93.25139877200127,
"min": -29.998801559209824,
"max": 120.87139804661274,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04687964050564046,
"min": 0.04188277736699465,
"max": 7.873247370123863,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.140935913877911,
"min": 3.140935913877911,
"max": 133.84520529210567,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1767967411",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1767969576"
},
"total": 2164.911773074,
"count": 1,
"self": 0.4842386020000049,
"children": {
"run_training.setup": {
"total": 0.024148526999852038,
"count": 1,
"self": 0.024148526999852038
},
"TrainerController.start_learning": {
"total": 2164.403385945,
"count": 1,
"self": 1.4689256789938554,
"children": {
"TrainerController._reset_env": {
"total": 2.2612166050000724,
"count": 1,
"self": 2.2612166050000724
},
"TrainerController.advance": {
"total": 2160.595386521006,
"count": 63616,
"self": 1.46234413302318,
"children": {
"env_step": {
"total": 1500.8253894999832,
"count": 63616,
"self": 1346.1176546379181,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.85851863696598,
"count": 63616,
"self": 4.671780129912577,
"children": {
"TorchPolicy.evaluate": {
"total": 149.1867385070534,
"count": 62563,
"self": 149.1867385070534
}
}
},
"workers": {
"total": 0.8492162250990987,
"count": 63616,
"self": 0.0,
"children": {
"worker_root": {
"total": 2157.5198030149663,
"count": 63616,
"is_parallel": true,
"self": 931.066611566989,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017965079998703004,
"count": 1,
"is_parallel": true,
"self": 0.0005891509999855771,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012073569998847233,
"count": 8,
"is_parallel": true,
"self": 0.0012073569998847233
}
}
},
"UnityEnvironment.step": {
"total": 0.05861401700008173,
"count": 1,
"is_parallel": true,
"self": 0.0005402629999480268,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043822300017382076,
"count": 1,
"is_parallel": true,
"self": 0.00043822300017382076
},
"communicator.exchange": {
"total": 0.055924718999904144,
"count": 1,
"is_parallel": true,
"self": 0.055924718999904144
},
"steps_from_proto": {
"total": 0.0017108120000557392,
"count": 1,
"is_parallel": true,
"self": 0.0003637100005562388,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013471019994995004,
"count": 8,
"is_parallel": true,
"self": 0.0013471019994995004
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1226.4531914479774,
"count": 63615,
"is_parallel": true,
"self": 33.60341632992254,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.897310649010024,
"count": 63615,
"is_parallel": true,
"self": 22.897310649010024
},
"communicator.exchange": {
"total": 1064.2786051031092,
"count": 63615,
"is_parallel": true,
"self": 1064.2786051031092
},
"steps_from_proto": {
"total": 105.67385936593564,
"count": 63615,
"is_parallel": true,
"self": 22.09089908392548,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.58296028201016,
"count": 508920,
"is_parallel": true,
"self": 83.58296028201016
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 658.3076528879992,
"count": 63616,
"self": 2.802456128857102,
"children": {
"process_trajectory": {
"total": 122.6630907951378,
"count": 63616,
"self": 122.46348511813744,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19960567700036336,
"count": 2,
"self": 0.19960567700036336
}
}
},
"_update_policy": {
"total": 532.8421059640043,
"count": 451,
"self": 295.8816828259776,
"children": {
"TorchPPOOptimizer.update": {
"total": 236.96042313802673,
"count": 22752,
"self": 236.96042313802673
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.68000370246591e-07,
"count": 1,
"self": 8.68000370246591e-07
},
"TrainerController._save_models": {
"total": 0.07785627199973533,
"count": 1,
"self": 0.0014498629993795475,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07640640900035578,
"count": 1,
"self": 0.07640640900035578
}
}
}
}
}
}
}