Huggingfly's picture
First Push
19149d2
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3624989092350006,
"min": 0.34844693541526794,
"max": 1.4669671058654785,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10851.767578125,
"min": 10486.859375,
"max": 44501.9140625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989956.0,
"min": 29952.0,
"max": 989956.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5701069235801697,
"min": -0.10298356413841248,
"max": 0.5857205390930176,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 156.20928955078125,
"min": -24.81903839111328,
"max": 168.10179138183594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.05412546917796135,
"min": 0.007505383342504501,
"max": 0.3047816753387451,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 14.830378532409668,
"min": 2.041464328765869,
"max": 73.45238494873047,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06575968648013215,
"min": 0.06575968648013215,
"max": 0.0730318077424142,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9206356107218501,
"min": 0.4755342543669093,
"max": 1.0853028095831783,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017885478927859196,
"min": 0.00030657739244263355,
"max": 0.017885478927859196,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25039670499002875,
"min": 0.003985506101754236,
"max": 0.25890937116677304,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.305718993364287e-06,
"min": 7.305718993364287e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010228006590710001,
"min": 0.00010228006590710001,
"max": 0.0036333496888835,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10243520714285716,
"min": 0.10243520714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340929000000002,
"min": 1.3886848,
"max": 2.6111165,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025327719357142863,
"min": 0.00025327719357142863,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035458807100000006,
"min": 0.0035458807100000006,
"max": 0.12113053835,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012481734156608582,
"min": 0.012481734156608582,
"max": 0.4700690805912018,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17474427819252014,
"min": 0.17474427819252014,
"max": 3.2904834747314453,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 345.1954022988506,
"min": 307.3673469387755,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30032.0,
"min": 15984.0,
"max": 33186.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5858183700120312,
"min": -1.0000000521540642,
"max": 1.6546545245430686,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 137.96619819104671,
"min": -31.99320164322853,
"max": 163.8107979297638,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5858183700120312,
"min": -1.0000000521540642,
"max": 1.6546545245430686,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 137.96619819104671,
"min": -31.99320164322853,
"max": 163.8107979297638,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04479393723882279,
"min": 0.03927308471971591,
"max": 9.190796630457044,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8970725397775823,
"min": 3.7879640555620426,
"max": 147.0527460873127,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688845950",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688848414"
},
"total": 2463.7772321370003,
"count": 1,
"self": 0.525834035999651,
"children": {
"run_training.setup": {
"total": 0.04058082899996407,
"count": 1,
"self": 0.04058082899996407
},
"TrainerController.start_learning": {
"total": 2463.2108172720004,
"count": 1,
"self": 1.571870161060815,
"children": {
"TrainerController._reset_env": {
"total": 5.0819342960000995,
"count": 1,
"self": 5.0819342960000995
},
"TrainerController.advance": {
"total": 2456.4583902199406,
"count": 63847,
"self": 1.5399589500261754,
"children": {
"env_step": {
"total": 1755.0836866799407,
"count": 63847,
"self": 1634.9016495850199,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.2601106079901,
"count": 63847,
"self": 5.021007316939176,
"children": {
"TorchPolicy.evaluate": {
"total": 114.23910329105092,
"count": 62532,
"self": 114.23910329105092
}
}
},
"workers": {
"total": 0.9219264869307153,
"count": 63847,
"self": 0.0,
"children": {
"worker_root": {
"total": 2457.2653820890187,
"count": 63847,
"is_parallel": true,
"self": 945.2337528530484,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001966485999901124,
"count": 1,
"is_parallel": true,
"self": 0.0006216009999207017,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013448849999804224,
"count": 8,
"is_parallel": true,
"self": 0.0013448849999804224
}
}
},
"UnityEnvironment.step": {
"total": 0.10898077799993189,
"count": 1,
"is_parallel": true,
"self": 0.0005948049997641647,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005527710000023944,
"count": 1,
"is_parallel": true,
"self": 0.0005527710000023944
},
"communicator.exchange": {
"total": 0.10147420600014811,
"count": 1,
"is_parallel": true,
"self": 0.10147420600014811
},
"steps_from_proto": {
"total": 0.006358996000017214,
"count": 1,
"is_parallel": true,
"self": 0.0004840229998990253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005874973000118189,
"count": 8,
"is_parallel": true,
"self": 0.005874973000118189
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1512.0316292359703,
"count": 63846,
"is_parallel": true,
"self": 35.89721445590635,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.182953463969397,
"count": 63846,
"is_parallel": true,
"self": 24.182953463969397
},
"communicator.exchange": {
"total": 1341.7218402150409,
"count": 63846,
"is_parallel": true,
"self": 1341.7218402150409
},
"steps_from_proto": {
"total": 110.22962110105368,
"count": 63846,
"is_parallel": true,
"self": 21.77734960427415,
"children": {
"_process_rank_one_or_two_observation": {
"total": 88.45227149677953,
"count": 510768,
"is_parallel": true,
"self": 88.45227149677953
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 699.8347445899735,
"count": 63847,
"self": 2.868316095979935,
"children": {
"process_trajectory": {
"total": 117.82736964499486,
"count": 63847,
"self": 117.60537341999475,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22199622500011174,
"count": 2,
"self": 0.22199622500011174
}
}
},
"_update_policy": {
"total": 579.1390588489987,
"count": 455,
"self": 374.16178485203795,
"children": {
"TorchPPOOptimizer.update": {
"total": 204.97727399696078,
"count": 22815,
"self": 204.97727399696078
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.709992809803225e-07,
"count": 1,
"self": 9.709992809803225e-07
},
"TrainerController._save_models": {
"total": 0.09862162399986119,
"count": 1,
"self": 0.0015134370005398523,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09710818699932133,
"count": 1,
"self": 0.09710818699932133
}
}
}
}
}
}
}