atmikah's picture
New model push
1ecd411 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31657862663269043,
"min": 0.31657862663269043,
"max": 1.4554413557052612,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9477.09765625,
"min": 9477.09765625,
"max": 44152.26953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989980.0,
"min": 29952.0,
"max": 989980.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6274967193603516,
"min": -0.11481665819883347,
"max": 0.6775597333908081,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 176.3265838623047,
"min": -27.670814514160156,
"max": 193.6967010498047,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007007249165326357,
"min": 0.0015687725972384214,
"max": 0.2707160711288452,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.9690370559692383,
"min": 0.3968994617462158,
"max": 64.97185516357422,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06876534378738904,
"min": 0.0638591812139133,
"max": 0.07539782882806667,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0314801568108356,
"min": 0.4858075592386865,
"max": 1.0861022743920332,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015090243930276331,
"min": 0.0014578654519899834,
"max": 0.01636559136651446,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22635365895414497,
"min": 0.014578654519899835,
"max": 0.23666382780841863,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.471197509633331e-06,
"min": 7.471197509633331e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011206796264449996,
"min": 0.00011206796264449996,
"max": 0.003633119888960099,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249036666666664,
"min": 0.10249036666666664,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373554999999997,
"min": 1.3886848,
"max": 2.6177872000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025878762999999994,
"min": 0.00025878762999999994,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003881814449999999,
"min": 0.003881814449999999,
"max": 0.12112288601000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013059018179774284,
"min": 0.013059018179774284,
"max": 0.3602496087551117,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19588527083396912,
"min": 0.18780364096164703,
"max": 2.521747350692749,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 294.9072164948454,
"min": 273.9259259259259,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28606.0,
"min": 15984.0,
"max": 33199.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6638412233634092,
"min": -1.0000000521540642,
"max": 1.7112599892914295,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 161.3925986662507,
"min": -30.58620174229145,
"max": 184.41499803215265,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6638412233634092,
"min": -1.0000000521540642,
"max": 1.7112599892914295,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 161.3925986662507,
"min": -30.58620174229145,
"max": 184.41499803215265,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03977758732813977,
"min": 0.038694411753331895,
"max": 5.93458700645715,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8584259708295576,
"min": 3.8584259708295576,
"max": 94.9533921033144,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1707503277",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids_Training_2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1707505522"
},
"total": 2245.319947405,
"count": 1,
"self": 0.5272818630000984,
"children": {
"run_training.setup": {
"total": 0.045931434000067384,
"count": 1,
"self": 0.045931434000067384
},
"TrainerController.start_learning": {
"total": 2244.746734108,
"count": 1,
"self": 1.3551528949469684,
"children": {
"TrainerController._reset_env": {
"total": 2.591978273999757,
"count": 1,
"self": 2.591978273999757
},
"TrainerController.advance": {
"total": 2240.715435246053,
"count": 64307,
"self": 1.3556013230345343,
"children": {
"env_step": {
"total": 1622.8087104969645,
"count": 64307,
"self": 1495.0211759589215,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.96520265190884,
"count": 64307,
"self": 4.483194459915467,
"children": {
"TorchPolicy.evaluate": {
"total": 122.48200819199337,
"count": 62551,
"self": 122.48200819199337
}
}
},
"workers": {
"total": 0.8223318861341795,
"count": 64307,
"self": 0.0,
"children": {
"worker_root": {
"total": 2239.8021957169412,
"count": 64307,
"is_parallel": true,
"self": 856.1966301607363,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002120863000072859,
"count": 1,
"is_parallel": true,
"self": 0.0007098269993548456,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014110360007180134,
"count": 8,
"is_parallel": true,
"self": 0.0014110360007180134
}
}
},
"UnityEnvironment.step": {
"total": 0.07481945000017731,
"count": 1,
"is_parallel": true,
"self": 0.0005770530005975161,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048371000002589426,
"count": 1,
"is_parallel": true,
"self": 0.00048371000002589426
},
"communicator.exchange": {
"total": 0.07214259699958347,
"count": 1,
"is_parallel": true,
"self": 0.07214259699958347
},
"steps_from_proto": {
"total": 0.001616089999970427,
"count": 1,
"is_parallel": true,
"self": 0.0003722429996741994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012438470002962276,
"count": 8,
"is_parallel": true,
"self": 0.0012438470002962276
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1383.605565556205,
"count": 64306,
"is_parallel": true,
"self": 34.697032933224364,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.583619527958035,
"count": 64306,
"is_parallel": true,
"self": 23.583619527958035
},
"communicator.exchange": {
"total": 1228.2680790799955,
"count": 64306,
"is_parallel": true,
"self": 1228.2680790799955
},
"steps_from_proto": {
"total": 97.0568340150271,
"count": 64306,
"is_parallel": true,
"self": 18.96307213379896,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.09376188122815,
"count": 514448,
"is_parallel": true,
"self": 78.09376188122815
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 616.5511234260539,
"count": 64307,
"self": 2.6369264381287394,
"children": {
"process_trajectory": {
"total": 123.49490136293343,
"count": 64307,
"self": 123.29177375393328,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20312760900014837,
"count": 2,
"self": 0.20312760900014837
}
}
},
"_update_policy": {
"total": 490.4192956249917,
"count": 451,
"self": 285.5619166979768,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.8573789270149,
"count": 22755,
"self": 204.8573789270149
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.55000359681435e-07,
"count": 1,
"self": 9.55000359681435e-07
},
"TrainerController._save_models": {
"total": 0.08416673800002172,
"count": 1,
"self": 0.0013754459996562218,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0827912920003655,
"count": 1,
"self": 0.0827912920003655
}
}
}
}
}
}
}