snj144's picture
First training of Pyramids Training
5454e41 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5436019897460938,
"min": 0.5436019897460938,
"max": 1.4508800506591797,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16438.5234375,
"min": 16438.5234375,
"max": 44013.8984375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.15395896136760712,
"min": -0.15580835938453674,
"max": 0.18114478886127472,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 38.643699645996094,
"min": -36.92658233642578,
"max": 46.01077651977539,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.25546225905418396,
"min": -0.25546225905418396,
"max": 0.47085821628570557,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -64.12102508544922,
"min": -64.12102508544922,
"max": 111.59339904785156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06752951525243416,
"min": 0.06335378730165316,
"max": 0.07439142625327973,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0129427287865123,
"min": 0.4933549757187466,
"max": 1.0149018409432684,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.026279413391412938,
"min": 0.00034206113817328716,
"max": 0.026279413391412938,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3941912008711941,
"min": 0.00478885593442602,
"max": 0.3941912008711941,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.52523749162e-06,
"min": 7.52523749162e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001128785623743,
"min": 0.0001128785623743,
"max": 0.0035084354305216,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250838000000001,
"min": 0.10250838000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5376257000000002,
"min": 1.3691136000000002,
"max": 2.5694784,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026058716200000006,
"min": 0.00026058716200000006,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003908807430000001,
"min": 0.003908807430000001,
"max": 0.11697089216,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.017434658482670784,
"min": 0.01664665900170803,
"max": 0.5009474754333496,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2615198791027069,
"min": 0.23305322229862213,
"max": 3.5066323280334473,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 643.0869565217391,
"min": 643.0869565217391,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29582.0,
"min": 15984.0,
"max": 33870.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8785303946098556,
"min": -1.0000000521540642,
"max": 0.8785303946098556,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 40.412398152053356,
"min": -32.000001668930054,
"max": 40.412398152053356,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8785303946098556,
"min": -1.0000000521540642,
"max": 0.8785303946098556,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 40.412398152053356,
"min": -32.000001668930054,
"max": 40.412398152053356,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.11537425384132961,
"min": 0.11537425384132961,
"max": 9.575675778090954,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.307215676701162,
"min": 4.66499188169837,
"max": 153.21081244945526,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730274923",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730278196"
},
"total": 3273.419181661,
"count": 1,
"self": 0.5994365349997679,
"children": {
"run_training.setup": {
"total": 0.07832411399999728,
"count": 1,
"self": 0.07832411399999728
},
"TrainerController.start_learning": {
"total": 3272.7414210120005,
"count": 1,
"self": 2.3125412070289713,
"children": {
"TrainerController._reset_env": {
"total": 6.543559511000012,
"count": 1,
"self": 6.543559511000012
},
"TrainerController.advance": {
"total": 3263.788168044971,
"count": 63354,
"self": 2.4570324900087144,
"children": {
"env_step": {
"total": 2104.566226810979,
"count": 63354,
"self": 1937.369516979091,
"children": {
"SubprocessEnvManager._take_step": {
"total": 165.79950481897697,
"count": 63354,
"self": 7.227947281964816,
"children": {
"TorchPolicy.evaluate": {
"total": 158.57155753701215,
"count": 62563,
"self": 158.57155753701215
}
}
},
"workers": {
"total": 1.3972050129110585,
"count": 63354,
"self": 0.0,
"children": {
"worker_root": {
"total": 3265.213031535058,
"count": 63354,
"is_parallel": true,
"self": 1508.1465728789929,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022864090000211945,
"count": 1,
"is_parallel": true,
"self": 0.0006937319999451574,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001592677000076037,
"count": 8,
"is_parallel": true,
"self": 0.001592677000076037
}
}
},
"UnityEnvironment.step": {
"total": 0.06338494600004196,
"count": 1,
"is_parallel": true,
"self": 0.0007933729999649586,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005383940000456278,
"count": 1,
"is_parallel": true,
"self": 0.0005383940000456278
},
"communicator.exchange": {
"total": 0.05972101399993335,
"count": 1,
"is_parallel": true,
"self": 0.05972101399993335
},
"steps_from_proto": {
"total": 0.00233216500009803,
"count": 1,
"is_parallel": true,
"self": 0.0004866559999072706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018455090001907593,
"count": 8,
"is_parallel": true,
"self": 0.0018455090001907593
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1757.066458656065,
"count": 63353,
"is_parallel": true,
"self": 50.307633460131,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.31575765499906,
"count": 63353,
"is_parallel": true,
"self": 31.31575765499906
},
"communicator.exchange": {
"total": 1544.9367216019718,
"count": 63353,
"is_parallel": true,
"self": 1544.9367216019718
},
"steps_from_proto": {
"total": 130.50634593896302,
"count": 63353,
"is_parallel": true,
"self": 28.365129459946843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.14121647901618,
"count": 506824,
"is_parallel": true,
"self": 102.14121647901618
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1156.7649087439834,
"count": 63354,
"self": 4.588576657908334,
"children": {
"process_trajectory": {
"total": 172.8926074640724,
"count": 63354,
"self": 172.50078976507234,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3918176990000575,
"count": 2,
"self": 0.3918176990000575
}
}
},
"_update_policy": {
"total": 979.2837246220025,
"count": 447,
"self": 396.3607008809953,
"children": {
"TorchPPOOptimizer.update": {
"total": 582.9230237410072,
"count": 22812,
"self": 582.9230237410072
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.621000137674855e-06,
"count": 1,
"self": 1.621000137674855e-06
},
"TrainerController._save_models": {
"total": 0.09715062800023588,
"count": 1,
"self": 0.002208966000125656,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09494166200011023,
"count": 1,
"self": 0.09494166200011023
}
}
}
}
}
}
}