VijayaKrishnaRamesh's picture
First Push
7ea0c71 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38231709599494934,
"min": 0.38231709599494934,
"max": 1.4944969415664673,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11292.1181640625,
"min": 11292.1181640625,
"max": 45337.05859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989925.0,
"min": 29952.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989925.0,
"min": 29952.0,
"max": 989925.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6186416149139404,
"min": -0.06953807920217514,
"max": 0.6261042356491089,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 175.6942138671875,
"min": -16.828214645385742,
"max": 178.43971252441406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.005877800285816193,
"min": -0.0035303542390465736,
"max": 0.25761762261390686,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.669295310974121,
"min": -0.9849688410758972,
"max": 61.82822799682617,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07053150946063418,
"min": 0.0644864332210827,
"max": 0.07353129327748918,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9874411324488784,
"min": 0.49912314937850083,
"max": 1.0532045862540447,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014257595735738714,
"min": 0.0011016902862592522,
"max": 0.015586029324864484,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.199606340300342,
"min": 0.007711832003814765,
"max": 0.21820441054810277,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.0147045506257968e-05,
"min": 1.0147045506257968e-05,
"max": 0.0002951938852208976,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00014205863708761156,
"min": 0.00014205863708761156,
"max": 0.003639878184725173,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10338231629619143,
"min": 0.10338231629619143,
"max": 0.1983979612062863,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.44735242814668,
"min": 1.3693890981169472,
"max": 2.6132926660059472,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000347893397989523,
"min": 0.000347893397989523,
"max": 0.009839956324508,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.004870507571853322,
"min": 0.004870507571853322,
"max": 0.12134793733399406,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007071653846651316,
"min": 0.007071653846651316,
"max": 0.38067108392715454,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.0990031510591507,
"min": 0.0990031510591507,
"max": 2.6646976470947266,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 301.6831683168317,
"min": 301.6831683168317,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30470.0,
"min": 15984.0,
"max": 32636.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6587009745659214,
"min": -1.0000000521540642,
"max": 1.6870449257365774,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 167.52879843115807,
"min": -32.000001668930054,
"max": 167.52879843115807,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6587009745659214,
"min": -1.0000000521540642,
"max": 1.6870449257365774,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 167.52879843115807,
"min": -32.000001668930054,
"max": 167.52879843115807,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.02203766515609315,
"min": 0.02203766515609315,
"max": 7.524008542299271,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.225804180765408,
"min": 2.0949891084019328,
"max": 120.38413667678833,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709224060",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1709226302"
},
"total": 2241.47639622,
"count": 1,
"self": 0.7105202580000878,
"children": {
"run_training.setup": {
"total": 0.04944692800017947,
"count": 1,
"self": 0.04944692800017947
},
"TrainerController.start_learning": {
"total": 2240.7164290339997,
"count": 1,
"self": 1.3455406860248331,
"children": {
"TrainerController._reset_env": {
"total": 3.371157657999902,
"count": 1,
"self": 3.371157657999902
},
"TrainerController.advance": {
"total": 2235.8740682069756,
"count": 64861,
"self": 1.5258887910831618,
"children": {
"env_step": {
"total": 1601.1954687489501,
"count": 64861,
"self": 1471.6833995939442,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.6800177130708,
"count": 64861,
"self": 4.6115159430582935,
"children": {
"TorchPolicy.evaluate": {
"total": 124.06850177001252,
"count": 63149,
"self": 124.06850177001252
}
}
},
"workers": {
"total": 0.8320514419351639,
"count": 64861,
"self": 0.0,
"children": {
"worker_root": {
"total": 2235.484465490114,
"count": 64861,
"is_parallel": true,
"self": 880.3156610800738,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005353417000151239,
"count": 1,
"is_parallel": true,
"self": 0.004001277000043046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001352140000108193,
"count": 8,
"is_parallel": true,
"self": 0.001352140000108193
}
}
},
"UnityEnvironment.step": {
"total": 0.0491004239997892,
"count": 1,
"is_parallel": true,
"self": 0.0006150679992060759,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004847800000788993,
"count": 1,
"is_parallel": true,
"self": 0.0004847800000788993
},
"communicator.exchange": {
"total": 0.04640720100042017,
"count": 1,
"is_parallel": true,
"self": 0.04640720100042017
},
"steps_from_proto": {
"total": 0.0015933750000840519,
"count": 1,
"is_parallel": true,
"self": 0.0003471599998192687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012462150002647832,
"count": 8,
"is_parallel": true,
"self": 0.0012462150002647832
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1355.1688044100401,
"count": 64860,
"is_parallel": true,
"self": 36.060724112135176,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.53979809995417,
"count": 64860,
"is_parallel": true,
"self": 24.53979809995417
},
"communicator.exchange": {
"total": 1194.306809461972,
"count": 64860,
"is_parallel": true,
"self": 1194.306809461972
},
"steps_from_proto": {
"total": 100.26147273597871,
"count": 64860,
"is_parallel": true,
"self": 19.81074685082058,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.45072588515814,
"count": 518880,
"is_parallel": true,
"self": 80.45072588515814
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 633.1527106669423,
"count": 64861,
"self": 2.7284159519767854,
"children": {
"process_trajectory": {
"total": 127.63635243096678,
"count": 64861,
"self": 127.400298198967,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2360542319997876,
"count": 2,
"self": 0.2360542319997876
}
}
},
"_update_policy": {
"total": 502.78794228399875,
"count": 458,
"self": 293.0771406549975,
"children": {
"TorchPPOOptimizer.update": {
"total": 209.71080162900125,
"count": 23001,
"self": 209.71080162900125
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3709995982935652e-06,
"count": 1,
"self": 1.3709995982935652e-06
},
"TrainerController._save_models": {
"total": 0.1256611119997615,
"count": 1,
"self": 0.0019485909997456474,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12371252100001584,
"count": 1,
"self": 0.12371252100001584
}
}
}
}
}
}
}