Eric0804's picture
First Push
d5c7692 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6447579860687256,
"min": 0.5557221174240112,
"max": 1.4957133531570435,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19414.953125,
"min": 16502.724609375,
"max": 45373.9609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989952.0,
"min": 29928.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989952.0,
"min": 29928.0,
"max": 989952.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3093614876270294,
"min": -0.10174134373664856,
"max": 0.3093614876270294,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 79.81526184082031,
"min": -24.62140464782715,
"max": 79.81526184082031,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.19520945847034454,
"min": 0.006585023365914822,
"max": 0.346086323261261,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 50.36404037475586,
"min": 1.6001607179641724,
"max": 82.0224609375,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06867015071501674,
"min": 0.06570940099578429,
"max": 0.07221308236650494,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9613821100102344,
"min": 0.47774123095595494,
"max": 1.0668598829800557,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.023815260263795265,
"min": 0.00011471801035447246,
"max": 0.023815260263795265,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.3334136436931337,
"min": 0.0016060521449626144,
"max": 0.3334136436931337,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.652154592171433e-06,
"min": 7.652154592171433e-06,
"max": 0.00029523394444582856,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010713016429040006,
"min": 0.00010713016429040006,
"max": 0.0035075612308129995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255068571428572,
"min": 0.10255068571428572,
"max": 0.19841131428571426,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357096,
"min": 1.3888791999999999,
"max": 2.5691869999999994,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000264813502857143,
"min": 0.000264813502857143,
"max": 0.009841290297142856,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037073890400000017,
"min": 0.0037073890400000017,
"max": 0.1169417813,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01312264148145914,
"min": 0.01312264148145914,
"max": 0.3906037509441376,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18371698260307312,
"min": 0.18371698260307312,
"max": 2.7342262268066406,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 589.4528301886793,
"min": 589.4528301886793,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31241.0,
"min": 16599.0,
"max": 32996.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1840641155557812,
"min": -0.9998375517316163,
"max": 1.1840641155557812,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 62.755398124456406,
"min": -31.99480165541172,
"max": 62.755398124456406,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1840641155557812,
"min": -0.9998375517316163,
"max": 1.1840641155557812,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 62.755398124456406,
"min": -31.99480165541172,
"max": 62.755398124456406,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08042543345264967,
"min": 0.08042543345264967,
"max": 7.797219623537624,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.262547972990433,
"min": 4.1794480800163,
"max": 132.55273360013962,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748340813",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748342945"
},
"total": 2131.8924382620003,
"count": 1,
"self": 0.4891884700000446,
"children": {
"run_training.setup": {
"total": 0.034636276999663096,
"count": 1,
"self": 0.034636276999663096
},
"TrainerController.start_learning": {
"total": 2131.3686135150006,
"count": 1,
"self": 1.3037525880490648,
"children": {
"TrainerController._reset_env": {
"total": 3.642440034000174,
"count": 1,
"self": 3.642440034000174
},
"TrainerController.advance": {
"total": 2126.339372950951,
"count": 63239,
"self": 1.4663894858722415,
"children": {
"env_step": {
"total": 1439.8735881680586,
"count": 63239,
"self": 1285.203679981039,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.93138613493193,
"count": 63239,
"self": 4.6488700739450906,
"children": {
"TorchPolicy.evaluate": {
"total": 149.28251606098684,
"count": 62556,
"self": 149.28251606098684
}
}
},
"workers": {
"total": 0.7385220520877738,
"count": 63239,
"self": 0.0,
"children": {
"worker_root": {
"total": 2126.4820550900163,
"count": 63239,
"is_parallel": true,
"self": 952.9204577149721,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0051752569997916,
"count": 1,
"is_parallel": true,
"self": 0.003746882999621448,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014283740001701517,
"count": 8,
"is_parallel": true,
"self": 0.0014283740001701517
}
}
},
"UnityEnvironment.step": {
"total": 0.08429158100034329,
"count": 1,
"is_parallel": true,
"self": 0.000601346000621561,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004942249997839099,
"count": 1,
"is_parallel": true,
"self": 0.0004942249997839099
},
"communicator.exchange": {
"total": 0.07937033100006374,
"count": 1,
"is_parallel": true,
"self": 0.07937033100006374
},
"steps_from_proto": {
"total": 0.0038256789998740715,
"count": 1,
"is_parallel": true,
"self": 0.002430088000892283,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013955909989817883,
"count": 8,
"is_parallel": true,
"self": 0.0013955909989817883
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1173.5615973750441,
"count": 63238,
"is_parallel": true,
"self": 32.59252751199347,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.808282684115966,
"count": 63238,
"is_parallel": true,
"self": 23.808282684115966
},
"communicator.exchange": {
"total": 1019.6108496959255,
"count": 63238,
"is_parallel": true,
"self": 1019.6108496959255
},
"steps_from_proto": {
"total": 97.54993748300922,
"count": 63238,
"is_parallel": true,
"self": 19.743431035697085,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.80650644731213,
"count": 505904,
"is_parallel": true,
"self": 77.80650644731213
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 684.9993952970203,
"count": 63239,
"self": 2.506981684007769,
"children": {
"process_trajectory": {
"total": 130.08466701100997,
"count": 63239,
"self": 129.82718864301023,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2574783679997381,
"count": 2,
"self": 0.2574783679997381
}
}
},
"_update_policy": {
"total": 552.4077466020026,
"count": 448,
"self": 306.6118411980019,
"children": {
"TorchPPOOptimizer.update": {
"total": 245.79590540400068,
"count": 22839,
"self": 245.79590540400068
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.64999640523456e-07,
"count": 1,
"self": 7.64999640523456e-07
},
"TrainerController._save_models": {
"total": 0.08304717700048059,
"count": 1,
"self": 0.0012929840004289872,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0817541930000516,
"count": 1,
"self": 0.0817541930000516
}
}
}
}
}
}
}