Kittitouch's picture
First Push
e4cb114
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.45872706174850464,
"min": 0.45872706174850464,
"max": 1.515926480293274,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13725.11328125,
"min": 13725.11328125,
"max": 45987.14453125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.43371695280075073,
"min": -0.12399439513683319,
"max": 0.47747862339019775,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 113.63384246826172,
"min": -29.758655548095703,
"max": 126.23753356933594,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.10327787697315216,
"min": -0.10327787697315216,
"max": 0.1550711840391159,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -27.05880355834961,
"min": -27.05880355834961,
"max": 37.21708297729492,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06788154357872415,
"min": 0.06606345561313273,
"max": 0.07455633012705967,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9503416101021382,
"min": 0.494009508129113,
"max": 1.0819738257171898,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014898586407417456,
"min": 0.0006412259166515521,
"max": 0.015148997864970525,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2085802097038444,
"min": 0.007694710999818626,
"max": 0.22723496797455786,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.27357614693571e-06,
"min": 7.27357614693571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010183006605709994,
"min": 0.00010183006605709994,
"max": 0.0032545193151602998,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242449285714286,
"min": 0.10242449285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339429000000001,
"min": 1.3691136000000002,
"max": 2.4847038000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002522068364285713,
"min": 0.0002522068364285713,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003530895709999998,
"min": 0.003530895709999998,
"max": 0.10850548602999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01247482467442751,
"min": 0.01247482467442751,
"max": 0.3745104968547821,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17464753985404968,
"min": 0.17464753985404968,
"max": 2.6215734481811523,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 450.1875,
"min": 409.63013698630135,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28812.0,
"min": 15984.0,
"max": 32136.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4560249783098698,
"min": -1.0000000521540642,
"max": 1.4997885385794298,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 93.18559861183167,
"min": -32.000001668930054,
"max": 106.91379843652248,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4560249783098698,
"min": -1.0000000521540642,
"max": 1.4997885385794298,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 93.18559861183167,
"min": -32.000001668930054,
"max": 106.91379843652248,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05845106490755825,
"min": 0.05569848564651967,
"max": 7.362769414670765,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.740868154083728,
"min": 3.740868154083728,
"max": 117.80431063473225,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678237258",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678239521"
},
"total": 2262.9954641699997,
"count": 1,
"self": 0.43302127099968857,
"children": {
"run_training.setup": {
"total": 0.16861931699997967,
"count": 1,
"self": 0.16861931699997967
},
"TrainerController.start_learning": {
"total": 2262.393823582,
"count": 1,
"self": 1.3450531800222052,
"children": {
"TrainerController._reset_env": {
"total": 7.493468083000039,
"count": 1,
"self": 7.493468083000039
},
"TrainerController.advance": {
"total": 2253.462971112978,
"count": 63551,
"self": 1.3354891469753056,
"children": {
"env_step": {
"total": 1524.8651535330087,
"count": 63551,
"self": 1413.3102514880447,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.75915290999137,
"count": 63551,
"self": 4.774217070017926,
"children": {
"TorchPolicy.evaluate": {
"total": 105.98493583997345,
"count": 62553,
"self": 35.910893108983146,
"children": {
"TorchPolicy.sample_actions": {
"total": 70.0740427309903,
"count": 62553,
"self": 70.0740427309903
}
}
}
}
},
"workers": {
"total": 0.7957491349726524,
"count": 63551,
"self": 0.0,
"children": {
"worker_root": {
"total": 2258.0509741990177,
"count": 63551,
"is_parallel": true,
"self": 956.1916785590186,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002672211000003699,
"count": 1,
"is_parallel": true,
"self": 0.0007714580001447757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019007529998589234,
"count": 8,
"is_parallel": true,
"self": 0.0019007529998589234
}
}
},
"UnityEnvironment.step": {
"total": 0.04784597599996232,
"count": 1,
"is_parallel": true,
"self": 0.0005909909999672891,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047959500000160915,
"count": 1,
"is_parallel": true,
"self": 0.00047959500000160915
},
"communicator.exchange": {
"total": 0.04485841899997922,
"count": 1,
"is_parallel": true,
"self": 0.04485841899997922
},
"steps_from_proto": {
"total": 0.0019169710000142004,
"count": 1,
"is_parallel": true,
"self": 0.0006001309999419391,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013168400000722613,
"count": 8,
"is_parallel": true,
"self": 0.0013168400000722613
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1301.8592956399991,
"count": 63550,
"is_parallel": true,
"self": 30.827032289000044,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.895228839047377,
"count": 63550,
"is_parallel": true,
"self": 22.895228839047377
},
"communicator.exchange": {
"total": 1157.73299440197,
"count": 63550,
"is_parallel": true,
"self": 1157.73299440197
},
"steps_from_proto": {
"total": 90.40404010998168,
"count": 63550,
"is_parallel": true,
"self": 21.19495573408608,
"children": {
"_process_rank_one_or_two_observation": {
"total": 69.2090843758956,
"count": 508400,
"is_parallel": true,
"self": 69.2090843758956
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 727.2623284329937,
"count": 63551,
"self": 2.4472810149940187,
"children": {
"process_trajectory": {
"total": 161.34824223800206,
"count": 63551,
"self": 161.1088371530018,
"children": {
"RLTrainer._checkpoint": {
"total": 0.23940508500027136,
"count": 2,
"self": 0.23940508500027136
}
}
},
"_update_policy": {
"total": 563.4668051799977,
"count": 445,
"self": 217.11616579100325,
"children": {
"TorchPPOOptimizer.update": {
"total": 346.3506393889944,
"count": 22782,
"self": 346.3506393889944
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.54999904934084e-07,
"count": 1,
"self": 9.54999904934084e-07
},
"TrainerController._save_models": {
"total": 0.09233025100002124,
"count": 1,
"self": 0.0013549330001296767,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09097531799989156,
"count": 1,
"self": 0.09097531799989156
}
}
}
}
}
}
}