| { | |
| "name": "root", | |
| "gauges": { | |
| "Pyramids.Policy.Entropy.mean": { | |
| "value": 1.1095993518829346, | |
| "min": 1.1095993518829346, | |
| "max": 1.5281413793563843, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Entropy.sum": { | |
| "value": 33234.71875, | |
| "min": 33234.71875, | |
| "max": 46357.6953125, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.mean": { | |
| "value": 89964.0, | |
| "min": 29952.0, | |
| "max": 89964.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Step.sum": { | |
| "value": 89964.0, | |
| "min": 29952.0, | |
| "max": 89964.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.mean": { | |
| "value": -0.1073603630065918, | |
| "min": -0.2275964617729187, | |
| "max": -0.1073603630065918, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicValueEstimate.sum": { | |
| "value": -25.87384796142578, | |
| "min": -53.94036102294922, | |
| "max": -25.87384796142578, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.mean": { | |
| "value": 0.13780467212200165, | |
| "min": 0.1295059472322464, | |
| "max": 0.18704542517662048, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndValueEstimate.sum": { | |
| "value": 33.2109260559082, | |
| "min": 30.692909240722656, | |
| "max": 44.89090347290039, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.mean": { | |
| "value": 0.06716353461042082, | |
| "min": 0.06716353461042082, | |
| "max": 0.07175296464250025, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.PolicyLoss.sum": { | |
| "value": 0.6716353461042083, | |
| "min": 0.4735668081580795, | |
| "max": 0.6716353461042083, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.mean": { | |
| "value": 0.001088611935647835, | |
| "min": 0.001025184603455058, | |
| "max": 0.004784235185225745, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.ValueLoss.sum": { | |
| "value": 0.01088611935647835, | |
| "min": 0.007176292224185405, | |
| "max": 0.03348964629658022, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.mean": { | |
| "value": 7.57332747556e-05, | |
| "min": 7.57332747556e-05, | |
| "max": 0.0002515063018788571, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.LearningRate.sum": { | |
| "value": 0.000757332747556, | |
| "min": 0.000757332747556, | |
| "max": 0.0017605441131519997, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.mean": { | |
| "value": 0.1252444, | |
| "min": 0.1252444, | |
| "max": 0.1838354285714286, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Epsilon.sum": { | |
| "value": 1.2524440000000001, | |
| "min": 1.0911359999999999, | |
| "max": 1.2868480000000002, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.mean": { | |
| "value": 0.0025319155600000004, | |
| "min": 0.0025319155600000004, | |
| "max": 0.008385159314285713, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.Beta.sum": { | |
| "value": 0.025319155600000004, | |
| "min": 0.025319155600000004, | |
| "max": 0.058696115199999996, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.mean": { | |
| "value": 0.09883655607700348, | |
| "min": 0.09883655607700348, | |
| "max": 0.3050103187561035, | |
| "count": 3 | |
| }, | |
| "Pyramids.Losses.RNDLoss.sum": { | |
| "value": 0.9883655905723572, | |
| "min": 0.9883655905723572, | |
| "max": 2.1350722312927246, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.mean": { | |
| "value": 979.7272727272727, | |
| "min": 979.7272727272727, | |
| "max": 999.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.EpisodeLength.sum": { | |
| "value": 32331.0, | |
| "min": 15984.0, | |
| "max": 32331.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.mean": { | |
| "value": -0.920078838639187, | |
| "min": -1.0000000521540642, | |
| "max": -0.920078838639187, | |
| "count": 3 | |
| }, | |
| "Pyramids.Environment.CumulativeReward.sum": { | |
| "value": -30.362601675093174, | |
| "min": -32.000001668930054, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.mean": { | |
| "value": -0.920078838639187, | |
| "min": -1.0000000521540642, | |
| "max": -0.920078838639187, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.ExtrinsicReward.sum": { | |
| "value": -30.362601675093174, | |
| "min": -32.000001668930054, | |
| "max": -16.000000834465027, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.mean": { | |
| "value": 1.1134050547173528, | |
| "min": 1.1134050547173528, | |
| "max": 5.731844781897962, | |
| "count": 3 | |
| }, | |
| "Pyramids.Policy.RndReward.sum": { | |
| "value": 36.742366805672646, | |
| "min": 36.742366805672646, | |
| "max": 91.7095165103674, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.mean": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| }, | |
| "Pyramids.IsTraining.sum": { | |
| "value": 1.0, | |
| "min": 1.0, | |
| "max": 1.0, | |
| "count": 3 | |
| } | |
| }, | |
| "metadata": { | |
| "timer_format_version": "0.1.0", | |
| "start_time_seconds": "1756505186", | |
| "python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]", | |
| "command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=../training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training3 --no-graphics", | |
| "mlagents_version": "1.2.0.dev0", | |
| "mlagents_envs_version": "1.2.0.dev0", | |
| "communication_protocol_version": "1.5.0", | |
| "pytorch_version": "2.8.0+cu128", | |
| "numpy_version": "1.23.5", | |
| "end_time_seconds": "1756505379" | |
| }, | |
| "total": 192.61450050700023, | |
| "count": 1, | |
| "self": 0.6423556210002062, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.020055898999999044, | |
| "count": 1, | |
| "self": 0.020055898999999044 | |
| }, | |
| "TrainerController.start_learning": { | |
| "total": 191.95208898700002, | |
| "count": 1, | |
| "self": 0.12651569198669677, | |
| "children": { | |
| "TrainerController._reset_env": { | |
| "total": 2.976642689000073, | |
| "count": 1, | |
| "self": 2.976642689000073 | |
| }, | |
| "TrainerController.advance": { | |
| "total": 188.68799452601297, | |
| "count": 6263, | |
| "self": 0.1372446490099719, | |
| "children": { | |
| "env_step": { | |
| "total": 123.1972438070211, | |
| "count": 6263, | |
| "self": 108.04269595208143, | |
| "children": { | |
| "SubprocessEnvManager._take_step": { | |
| "total": 15.075828517978607, | |
| "count": 6263, | |
| "self": 0.4565189839731829, | |
| "children": { | |
| "TorchPolicy.evaluate": { | |
| "total": 14.619309534005424, | |
| "count": 6260, | |
| "self": 14.619309534005424 | |
| } | |
| } | |
| }, | |
| "workers": { | |
| "total": 0.0787193369610577, | |
| "count": 6263, | |
| "self": 0.0, | |
| "children": { | |
| "worker_root": { | |
| "total": 191.42191162200243, | |
| "count": 6263, | |
| "is_parallel": true, | |
| "self": 93.97238270498065, | |
| "children": { | |
| "run_training.setup": { | |
| "total": 0.0, | |
| "count": 0, | |
| "is_parallel": true, | |
| "self": 0.0, | |
| "children": { | |
| "steps_from_proto": { | |
| "total": 0.0026602499997352425, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0007371289993898245, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.001923121000345418, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.001923121000345418 | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 0.04725945199970738, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.0006158769992907764, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 0.00047801900018384913, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00047801900018384913 | |
| }, | |
| "communicator.exchange": { | |
| "total": 0.044459821000145894, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.044459821000145894 | |
| }, | |
| "steps_from_proto": { | |
| "total": 0.001705735000086861, | |
| "count": 1, | |
| "is_parallel": true, | |
| "self": 0.00040631000001667417, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 0.0012994250000701868, | |
| "count": 8, | |
| "is_parallel": true, | |
| "self": 0.0012994250000701868 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "UnityEnvironment.step": { | |
| "total": 97.44952891702178, | |
| "count": 6262, | |
| "is_parallel": true, | |
| "self": 3.1937775220312687, | |
| "children": { | |
| "UnityEnvironment._generate_step_input": { | |
| "total": 2.2604773489720174, | |
| "count": 6262, | |
| "is_parallel": true, | |
| "self": 2.2604773489720174 | |
| }, | |
| "communicator.exchange": { | |
| "total": 82.57713548500942, | |
| "count": 6262, | |
| "is_parallel": true, | |
| "self": 82.57713548500942 | |
| }, | |
| "steps_from_proto": { | |
| "total": 9.418138561009073, | |
| "count": 6262, | |
| "is_parallel": true, | |
| "self": 1.856509808039391, | |
| "children": { | |
| "_process_rank_one_or_two_observation": { | |
| "total": 7.561628752969682, | |
| "count": 50096, | |
| "is_parallel": true, | |
| "self": 7.561628752969682 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_advance": { | |
| "total": 65.3535060699819, | |
| "count": 6263, | |
| "self": 0.1674284539767541, | |
| "children": { | |
| "process_trajectory": { | |
| "total": 11.78368719300397, | |
| "count": 6263, | |
| "self": 11.78368719300397 | |
| }, | |
| "_update_policy": { | |
| "total": 53.40239042300118, | |
| "count": 27, | |
| "self": 29.672020923001583, | |
| "children": { | |
| "TorchPPOOptimizer.update": { | |
| "total": 23.730369499999597, | |
| "count": 2286, | |
| "self": 23.730369499999597 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| }, | |
| "trainer_threads": { | |
| "total": 1.1969996194238774e-06, | |
| "count": 1, | |
| "self": 1.1969996194238774e-06 | |
| }, | |
| "TrainerController._save_models": { | |
| "total": 0.16093488300066383, | |
| "count": 1, | |
| "self": 0.00164775500070391, | |
| "children": { | |
| "RLTrainer._checkpoint": { | |
| "total": 0.15928712799995992, | |
| "count": 1, | |
| "self": 0.15928712799995992 | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } | |
| } |