darthrevenge's picture
First Push
15d0c23
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5619126558303833,
"min": 0.5619126558303833,
"max": 1.4403992891311646,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16785.455078125,
"min": 16785.455078125,
"max": 43695.953125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989946.0,
"min": 29952.0,
"max": 989946.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.41466617584228516,
"min": -0.10912616550922394,
"max": 0.5381873846054077,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 109.05720520019531,
"min": -26.299406051635742,
"max": 146.386962890625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.037442002445459366,
"min": -0.037442002445459366,
"max": 0.379725843667984,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -9.847246170043945,
"min": -9.847246170043945,
"max": 89.99502563476562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06993294266898634,
"min": 0.06521513590325863,
"max": 0.07345335218398011,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9790611973658088,
"min": 0.47388091624276873,
"max": 1.0773536657022003,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0161442431306655,
"min": 0.0005837712217951042,
"max": 0.0161442431306655,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22601940382931696,
"min": 0.005045172735145601,
"max": 0.22953196393363587,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.637133168607142e-06,
"min": 7.637133168607142e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010691986436049999,
"min": 0.00010691986436049999,
"max": 0.0032551571149477,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254567857142857,
"min": 0.10254567857142857,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4356395,
"min": 1.3691136000000002,
"max": 2.4017553,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002643132892857144,
"min": 0.0002643132892857144,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003700386050000001,
"min": 0.003700386050000001,
"max": 0.10852672476999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009500098414719105,
"min": 0.009500098414719105,
"max": 0.39393743872642517,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13300137221813202,
"min": 0.13300137221813202,
"max": 2.7575621604919434,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 447.35714285714283,
"min": 364.0833333333333,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31315.0,
"min": 15984.0,
"max": 32123.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3811599708029203,
"min": -1.0000000521540642,
"max": 1.5864574821665882,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 96.68119795620441,
"min": -32.000001668930054,
"max": 129.4143984168768,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3811599708029203,
"min": -1.0000000521540642,
"max": 1.5864574821665882,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 96.68119795620441,
"min": -32.000001668930054,
"max": 129.4143984168768,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.043198235350428146,
"min": 0.038499466746413545,
"max": 8.425044135190547,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.0238764745299704,
"min": 2.858691236760933,
"max": 134.80070616304874,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678406398",
"python_version": "3.8.16 (default, Mar 2 2023, 03:21:46) \n[GCC 11.2.0]",
"command_line_arguments": "/home/ikari/miniconda3/envs/unity/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.23.5",
"end_time_seconds": "1678411614"
},
"total": 5216.207330887206,
"count": 1,
"self": 0.3206421732902527,
"children": {
"run_training.setup": {
"total": 0.011301717720925808,
"count": 1,
"self": 0.011301717720925808
},
"TrainerController.start_learning": {
"total": 5215.875386996195,
"count": 1,
"self": 1.0568563602864742,
"children": {
"TrainerController._reset_env": {
"total": 2.8723809281364083,
"count": 1,
"self": 2.8723809281364083
},
"TrainerController.advance": {
"total": 5211.847565495409,
"count": 63505,
"self": 0.9841721057891846,
"children": {
"env_step": {
"total": 4689.575877144933,
"count": 63505,
"self": 4616.221918799914,
"children": {
"SubprocessEnvManager._take_step": {
"total": 72.69198597315699,
"count": 63505,
"self": 2.996336936019361,
"children": {
"TorchPolicy.evaluate": {
"total": 69.69564903713763,
"count": 62548,
"self": 23.941516778431833,
"children": {
"TorchPolicy.sample_actions": {
"total": 45.754132258705795,
"count": 62548,
"self": 45.754132258705795
}
}
}
}
},
"workers": {
"total": 0.6619723718613386,
"count": 63505,
"self": 0.0,
"children": {
"worker_root": {
"total": 5213.58686382696,
"count": 63505,
"is_parallel": true,
"self": 685.7414980847389,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002551085315644741,
"count": 1,
"is_parallel": true,
"self": 0.0007484452798962593,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018026400357484818,
"count": 8,
"is_parallel": true,
"self": 0.0018026400357484818
}
}
},
"UnityEnvironment.step": {
"total": 0.10599873028695583,
"count": 1,
"is_parallel": true,
"self": 0.00017119571566581726,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0021575894206762314,
"count": 1,
"is_parallel": true,
"self": 0.0021575894206762314
},
"communicator.exchange": {
"total": 0.10256992280483246,
"count": 1,
"is_parallel": true,
"self": 0.10256992280483246
},
"steps_from_proto": {
"total": 0.0011000223457813263,
"count": 1,
"is_parallel": true,
"self": 0.00026801321655511856,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008320091292262077,
"count": 8,
"is_parallel": true,
"self": 0.0008320091292262077
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4527.8453657422215,
"count": 63504,
"is_parallel": true,
"self": 11.013434931635857,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 64.7297540595755,
"count": 63504,
"is_parallel": true,
"self": 64.7297540595755
},
"communicator.exchange": {
"total": 4358.426709068939,
"count": 63504,
"is_parallel": true,
"self": 4358.426709068939
},
"steps_from_proto": {
"total": 93.67546768207103,
"count": 63504,
"is_parallel": true,
"self": 19.505507332272828,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.1699603497982,
"count": 508032,
"is_parallel": true,
"self": 74.1699603497982
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 521.2875162446871,
"count": 63505,
"self": 2.0599125484004617,
"children": {
"process_trajectory": {
"total": 103.50668336823583,
"count": 63505,
"self": 103.35031201783568,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15637135040014982,
"count": 2,
"self": 0.15637135040014982
}
}
},
"_update_policy": {
"total": 415.72092032805085,
"count": 440,
"self": 104.08446314185858,
"children": {
"TorchPPOOptimizer.update": {
"total": 311.6364571861923,
"count": 22818,
"self": 311.6364571861923
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.348135113716125e-07,
"count": 1,
"self": 7.348135113716125e-07
},
"TrainerController._save_models": {
"total": 0.0985834775492549,
"count": 1,
"self": 0.0010862154886126518,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09749726206064224,
"count": 1,
"self": 0.09749726206064224
}
}
}
}
}
}
}