ppo-Pyramids / run_logs /timers.json
LizardAPN's picture
First model for Pyramids
6a283b1 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.16267988085746765,
"min": 0.15106172859668732,
"max": 1.4884880781173706,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4901.21923828125,
"min": 4497.09033203125,
"max": 45154.7734375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999974.0,
"min": 29952.0,
"max": 2999974.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999974.0,
"min": 29952.0,
"max": 2999974.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8083919286727905,
"min": -0.13580511510372162,
"max": 0.8597111105918884,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 244.9427490234375,
"min": -32.729034423828125,
"max": 260.4924621582031,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008160619996488094,
"min": -0.01923423819243908,
"max": 0.22818012535572052,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.472667932510376,
"min": -5.462523460388184,
"max": 54.76322937011719,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06746472009982332,
"min": 0.060893372366470945,
"max": 0.07556750592077803,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9445060813975266,
"min": 0.4905561543708392,
"max": 1.0901178754900362,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01717592660751499,
"min": 0.00042859533246369117,
"max": 0.017718075087531378,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24046297250520987,
"min": 0.005571739322027985,
"max": 0.2480530512254393,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5212352072404723e-06,
"min": 1.5212352072404723e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1297292901366613e-05,
"min": 2.1297292901366613e-05,
"max": 0.0040275087574971336,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10050704523809526,
"min": 0.10050704523809526,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4070986333333335,
"min": 1.3897045333333333,
"max": 2.842502866666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.065381928571417e-05,
"min": 6.065381928571417e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008491534699999984,
"min": 0.0008491534699999984,
"max": 0.13426603638,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005461750086396933,
"min": 0.005290467757731676,
"max": 0.37441498041152954,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07646450400352478,
"min": 0.07406654953956604,
"max": 2.6209049224853516,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 224.8695652173913,
"min": 211.55714285714285,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31032.0,
"min": 15984.0,
"max": 32861.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7606318706403608,
"min": -1.0000000521540642,
"max": 1.7884428436202662,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 242.9671981483698,
"min": -32.000001668930054,
"max": 253.56959754228592,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7606318706403608,
"min": -1.0000000521540642,
"max": 1.7884428436202662,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 242.9671981483698,
"min": -32.000001668930054,
"max": 253.56959754228592,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.012773444664386603,
"min": 0.012145246718126437,
"max": 7.725096891634166,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7627353636853513,
"min": 1.6143771625793306,
"max": 123.60155026614666,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1755000330",
"python_version": "3.10.12 (main, Aug 12 2025, 15:10:59) [GCC 13.3.0]",
"command_line_arguments": "/home/dmin/.local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 2 --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1755007016"
},
"total": 6653.2202589210065,
"count": 1,
"self": 0.32377456498215906,
"children": {
"run_training.setup": {
"total": 0.016235606017289683,
"count": 1,
"self": 0.016235606017289683
},
"TrainerController.start_learning": {
"total": 6652.880248750007,
"count": 1,
"self": 3.883985931170173,
"children": {
"TrainerController._reset_env": {
"total": 2.142344323976431,
"count": 1,
"self": 2.142344323976431
},
"TrainerController.advance": {
"total": 6646.76788269286,
"count": 195221,
"self": 3.6751580499694683,
"children": {
"env_step": {
"total": 3998.9228850743966,
"count": 195221,
"self": 3208.152396306832,
"children": {
"SubprocessEnvManager._take_step": {
"total": 788.3695633320312,
"count": 195221,
"self": 14.179835881543113,
"children": {
"TorchPolicy.evaluate": {
"total": 774.1897274504881,
"count": 187559,
"self": 774.1897274504881
}
}
},
"workers": {
"total": 2.4009254355332814,
"count": 195221,
"self": 0.0,
"children": {
"worker_root": {
"total": 6644.099266035977,
"count": 195221,
"is_parallel": true,
"self": 3691.5352020152786,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001741266984026879,
"count": 1,
"is_parallel": true,
"self": 0.0005787660193163902,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001162500964710489,
"count": 8,
"is_parallel": true,
"self": 0.001162500964710489
}
}
},
"UnityEnvironment.step": {
"total": 0.03254750100313686,
"count": 1,
"is_parallel": true,
"self": 0.000473192980280146,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004426020022947341,
"count": 1,
"is_parallel": true,
"self": 0.0004426020022947341
},
"communicator.exchange": {
"total": 0.030853847012622282,
"count": 1,
"is_parallel": true,
"self": 0.030853847012622282
},
"steps_from_proto": {
"total": 0.0007778590079396963,
"count": 1,
"is_parallel": true,
"self": 0.00016830096137709916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006095580465625972,
"count": 8,
"is_parallel": true,
"self": 0.0006095580465625972
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2952.5640640206984,
"count": 195220,
"is_parallel": true,
"self": 40.62324331497075,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 28.81000149427564,
"count": 195220,
"is_parallel": true,
"self": 28.81000149427564
},
"communicator.exchange": {
"total": 2773.7865849268273,
"count": 195220,
"is_parallel": true,
"self": 2773.7865849268273
},
"steps_from_proto": {
"total": 109.34423428462469,
"count": 195220,
"is_parallel": true,
"self": 26.744991081679473,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.59924320294522,
"count": 1561760,
"is_parallel": true,
"self": 82.59924320294522
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2644.1698395684944,
"count": 195221,
"self": 7.084105349495076,
"children": {
"process_trajectory": {
"total": 391.0266002462886,
"count": 195221,
"self": 390.30944100031047,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7171592459781095,
"count": 6,
"self": 0.7171592459781095
}
}
},
"_update_policy": {
"total": 2246.0591339727107,
"count": 1394,
"self": 793.3079589319823,
"children": {
"TorchPPOOptimizer.update": {
"total": 1452.7511750407284,
"count": 68421,
"self": 1452.7511750407284
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.390037015080452e-07,
"count": 1,
"self": 6.390037015080452e-07
},
"TrainerController._save_models": {
"total": 0.0860351629962679,
"count": 1,
"self": 0.0017064679705072194,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08432869502576068,
"count": 1,
"self": 0.08432869502576068
}
}
}
}
}
}
}