yusufdemrr's picture
First Push
de5b22d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.33512890338897705,
"min": 0.33317136764526367,
"max": 1.4514060020446777,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10000.24609375,
"min": 9973.818359375,
"max": 44029.8515625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989951.0,
"min": 29952.0,
"max": 989951.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989951.0,
"min": 29952.0,
"max": 989951.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5774832367897034,
"min": -0.09277313947677612,
"max": 0.6056317687034607,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.6953125,
"min": -22.358325958251953,
"max": 169.36465454101562,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008356794714927673,
"min": -0.013250600546598434,
"max": 0.41626444458961487,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.339902400970459,
"min": -3.683666944503784,
"max": 98.65467071533203,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06849815791688993,
"min": 0.06589756119303482,
"max": 0.07533286589231077,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9589742108364591,
"min": 0.5273300612461753,
"max": 1.0719657315034876,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015333865369376282,
"min": 0.000832745745754811,
"max": 0.01720611296346475,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21467411517126794,
"min": 0.010558291506722071,
"max": 0.2511852079535625,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2809047159214265e-06,
"min": 7.2809047159214265e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010193266602289997,
"min": 0.00010193266602289997,
"max": 0.003507975230675,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242693571428571,
"min": 0.10242693571428571,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339771,
"min": 1.3886848,
"max": 2.5693249999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002524508778571428,
"min": 0.0002524508778571428,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035343122899999997,
"min": 0.0035343122899999997,
"max": 0.11695556750000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013116887770593166,
"min": 0.013116887770593166,
"max": 0.4783676564693451,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18363642692565918,
"min": 0.18363642692565918,
"max": 3.348573684692383,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 320.34090909090907,
"min": 320.0348837209302,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28190.0,
"min": 15984.0,
"max": 33145.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6038363452323459,
"min": -1.0000000521540642,
"max": 1.6799650963309198,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 141.13759838044643,
"min": -30.777801677584648,
"max": 158.27459828555584,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6038363452323459,
"min": -1.0000000521540642,
"max": 1.6799650963309198,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 141.13759838044643,
"min": -30.777801677584648,
"max": 158.27459828555584,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.043955671761564605,
"min": 0.0433317355839168,
"max": 9.54686716478318,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.868099115017685,
"min": 3.868099115017685,
"max": 152.74987463653088,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719509096",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719511323"
},
"total": 2227.191766939,
"count": 1,
"self": 0.47631045999924027,
"children": {
"run_training.setup": {
"total": 0.05228087500017864,
"count": 1,
"self": 0.05228087500017864
},
"TrainerController.start_learning": {
"total": 2226.6631756040006,
"count": 1,
"self": 1.3280259790590208,
"children": {
"TrainerController._reset_env": {
"total": 2.3920564370000648,
"count": 1,
"self": 2.3920564370000648
},
"TrainerController.advance": {
"total": 2222.8589092729417,
"count": 64032,
"self": 1.4225378639193877,
"children": {
"env_step": {
"total": 1595.8389214310146,
"count": 64032,
"self": 1460.519948582937,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.44973275604661,
"count": 64032,
"self": 4.700311386061458,
"children": {
"TorchPolicy.evaluate": {
"total": 129.74942136998516,
"count": 62555,
"self": 129.74942136998516
}
}
},
"workers": {
"total": 0.8692400920308501,
"count": 64032,
"self": 0.0,
"children": {
"worker_root": {
"total": 2221.2946092570614,
"count": 64032,
"is_parallel": true,
"self": 884.0513336110066,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020996240000386024,
"count": 1,
"is_parallel": true,
"self": 0.0006701859997519932,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014294380002866092,
"count": 8,
"is_parallel": true,
"self": 0.0014294380002866092
}
}
},
"UnityEnvironment.step": {
"total": 0.07884696100018118,
"count": 1,
"is_parallel": true,
"self": 0.0007275100006154389,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043685499986167997,
"count": 1,
"is_parallel": true,
"self": 0.00043685499986167997
},
"communicator.exchange": {
"total": 0.07609647799972663,
"count": 1,
"is_parallel": true,
"self": 0.07609647799972663
},
"steps_from_proto": {
"total": 0.0015861179999774322,
"count": 1,
"is_parallel": true,
"self": 0.00033942200116143795,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012466959988159942,
"count": 8,
"is_parallel": true,
"self": 0.0012466959988159942
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1337.2432756460548,
"count": 64031,
"is_parallel": true,
"self": 33.66960028095491,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.28504994808236,
"count": 64031,
"is_parallel": true,
"self": 24.28504994808236
},
"communicator.exchange": {
"total": 1178.1353255149847,
"count": 64031,
"is_parallel": true,
"self": 1178.1353255149847
},
"steps_from_proto": {
"total": 101.15329990203281,
"count": 64031,
"is_parallel": true,
"self": 20.775255414241656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.37804448779116,
"count": 512248,
"is_parallel": true,
"self": 80.37804448779116
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 625.5974499780077,
"count": 64032,
"self": 2.6105301760417206,
"children": {
"process_trajectory": {
"total": 130.70743322896942,
"count": 64032,
"self": 130.50170906196945,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2057241669999712,
"count": 2,
"self": 0.2057241669999712
}
}
},
"_update_policy": {
"total": 492.2794865729966,
"count": 449,
"self": 291.4025214470107,
"children": {
"TorchPPOOptimizer.update": {
"total": 200.87696512598586,
"count": 22773,
"self": 200.87696512598586
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.779998097452335e-07,
"count": 1,
"self": 8.779998097452335e-07
},
"TrainerController._save_models": {
"total": 0.08418303700000251,
"count": 1,
"self": 0.001387775999319274,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08279526100068324,
"count": 1,
"self": 0.08279526100068324
}
}
}
}
}
}
}