Maulik-P's picture
First Push
25037c6
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4057021141052246,
"min": 0.4057021141052246,
"max": 1.181854486465454,
"count": 25
},
"Pyramids.Policy.Entropy.sum": {
"value": 12132.1162109375,
"min": 12132.1162109375,
"max": 35720.37109375,
"count": 25
},
"Pyramids.Step.mean": {
"value": 989965.0,
"min": 269971.0,
"max": 989965.0,
"count": 25
},
"Pyramids.Step.sum": {
"value": 989965.0,
"min": 269971.0,
"max": 989965.0,
"count": 25
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6342625021934509,
"min": -0.027926642447710037,
"max": 0.6648698449134827,
"count": 25
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 178.86203002929688,
"min": -4.915089130401611,
"max": 188.8230438232422,
"count": 25
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.013863488100469112,
"min": -0.013863488100469112,
"max": 0.03425129875540733,
"count": 25
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.909503698348999,
"min": -3.909503698348999,
"max": 8.460070610046387,
"count": 25
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07151196053295972,
"min": 0.0660224082666294,
"max": 0.07234664452713047,
"count": 25
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.001167447461436,
"min": 0.5019290691678109,
"max": 1.0355968370422488,
"count": 25
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016926430494225184,
"min": 0.0024665188063922676,
"max": 0.017754057775337496,
"count": 25
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2369700269191526,
"min": 0.017265631644745875,
"max": 0.2583722302370006,
"count": 25
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.660083160957138e-06,
"min": 7.660083160957138e-06,
"max": 0.00022186781175835714,
"count": 25
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010724116425339992,
"min": 0.00010724116425339992,
"max": 0.003004965398345,
"count": 25
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10255332857142858,
"min": 0.10255332857142858,
"max": 0.17395592857142855,
"count": 25
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4357466,
"min": 1.2176915,
"max": 2.401655,
"count": 25
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002650775242857141,
"min": 0.0002650775242857141,
"max": 0.007398197264285714,
"count": 25
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003711085339999997,
"min": 0.003711085339999997,
"max": 0.1002053345,
"count": 25
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010564847849309444,
"min": 0.010564847849309444,
"max": 0.02677089534699917,
"count": 25
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14790786802768707,
"min": 0.14790786802768707,
"max": 0.32736489176750183,
"count": 25
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 290.7843137254902,
"min": 279.1515151515151,
"max": 855.3888888888889,
"count": 25
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29660.0,
"min": 15397.0,
"max": 31979.0,
"count": 25
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6896039050291567,
"min": -0.2882432897348662,
"max": 1.7208484652066471,
"count": 25
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 172.33959831297398,
"min": -10.665001720190048,
"max": 172.67579817026854,
"count": 25
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6896039050291567,
"min": -0.2882432897348662,
"max": 1.7208484652066471,
"count": 25
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 172.33959831297398,
"min": -10.665001720190048,
"max": 172.67579817026854,
"count": 25
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031359393709577736,
"min": 0.031359393709577736,
"max": 0.24632914832738392,
"count": 25
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1986581583769294,
"min": 3.1238307242965675,
"max": 7.582607533782721,
"count": 25
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 25
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681051678",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --resume --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681054418"
},
"total": 2740.0586489969996,
"count": 1,
"self": 1.0349931780001498,
"children": {
"run_training.setup": {
"total": 0.24388598999985334,
"count": 1,
"self": 0.24388598999985334
},
"TrainerController.start_learning": {
"total": 2738.7797698289996,
"count": 1,
"self": 1.7436950861197147,
"children": {
"TrainerController._reset_env": {
"total": 1.4693327769996358,
"count": 1,
"self": 1.4693327769996358
},
"TrainerController.advance": {
"total": 2735.3697984398805,
"count": 48432,
"self": 1.9531862707463006,
"children": {
"env_step": {
"total": 1883.2610111991235,
"count": 48432,
"self": 1777.392116697165,
"children": {
"SubprocessEnvManager._take_step": {
"total": 104.7473115339667,
"count": 48432,
"self": 5.530611806001161,
"children": {
"TorchPolicy.evaluate": {
"total": 99.21669972796553,
"count": 47032,
"self": 99.21669972796553
}
}
},
"workers": {
"total": 1.1215829679917988,
"count": 48432,
"self": 0.0,
"children": {
"worker_root": {
"total": 2733.228798891074,
"count": 48432,
"is_parallel": true,
"self": 1084.8769362689927,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0029681599999094033,
"count": 1,
"is_parallel": true,
"self": 0.0009018659993671463,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002066294000542257,
"count": 8,
"is_parallel": true,
"self": 0.002066294000542257
}
}
},
"UnityEnvironment.step": {
"total": 0.12495746100012184,
"count": 1,
"is_parallel": true,
"self": 0.0058945030004906585,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005551699996431125,
"count": 1,
"is_parallel": true,
"self": 0.0005551699996431125
},
"communicator.exchange": {
"total": 0.11638420000008409,
"count": 1,
"is_parallel": true,
"self": 0.11638420000008409
},
"steps_from_proto": {
"total": 0.0021235879999039753,
"count": 1,
"is_parallel": true,
"self": 0.0005263219991320511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015972660007719242,
"count": 8,
"is_parallel": true,
"self": 0.0015972660007719242
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1648.3518626220812,
"count": 48431,
"is_parallel": true,
"self": 34.19777701228395,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.233306798842932,
"count": 48431,
"is_parallel": true,
"self": 20.233306798842932
},
"communicator.exchange": {
"total": 1496.0367313431188,
"count": 48431,
"is_parallel": true,
"self": 1496.0367313431188
},
"steps_from_proto": {
"total": 97.88404746783544,
"count": 48431,
"is_parallel": true,
"self": 22.428788938808793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 75.45525852902665,
"count": 387448,
"is_parallel": true,
"self": 75.45525852902665
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 850.1556009700107,
"count": 48432,
"self": 3.6856397079905037,
"children": {
"process_trajectory": {
"total": 110.74218102702343,
"count": 48432,
"self": 110.38320340102291,
"children": {
"RLTrainer._checkpoint": {
"total": 0.35897762600052374,
"count": 2,
"self": 0.35897762600052374
}
}
},
"_update_policy": {
"total": 735.7277802349968,
"count": 352,
"self": 300.06450094503134,
"children": {
"TorchPPOOptimizer.update": {
"total": 435.66327928996543,
"count": 17085,
"self": 435.66327928996543
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3019998732488602e-06,
"count": 1,
"self": 1.3019998732488602e-06
},
"TrainerController._save_models": {
"total": 0.19694222399994032,
"count": 1,
"self": 0.004863570999987132,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1920786529999532,
"count": 1,
"self": 0.1920786529999532
}
}
}
}
}
}
}