ppo-Pyramids / run_logs /timers.json
ArtYac's picture
First Push
9b91edf
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.42132025957107544,
"min": 0.42132025957107544,
"max": 1.45975923538208,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 12545.232421875,
"min": 12545.232421875,
"max": 44283.2578125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989998.0,
"min": 29952.0,
"max": 989998.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6347114443778992,
"min": -0.0907621830701828,
"max": 0.6347114443778992,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 183.43161010742188,
"min": -21.78292465209961,
"max": 183.43161010742188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.020268721505999565,
"min": -0.014653694815933704,
"max": 0.37435969710350037,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 5.85766077041626,
"min": -4.059073448181152,
"max": 88.72325134277344,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07096268798479634,
"min": 0.06471611243349511,
"max": 0.07377856872044504,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9934776317871487,
"min": 0.49283975394758467,
"max": 1.1066785308066756,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.020026575500932738,
"min": 0.0007847524357678842,
"max": 0.020026575500932738,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.28037205701305834,
"min": 0.008632276793446726,
"max": 0.28037205701305834,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.69201172174286e-06,
"min": 7.69201172174286e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010768816410440003,
"min": 0.00010768816410440003,
"max": 0.0033827192724270004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256397142857143,
"min": 0.10256397142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358956,
"min": 1.3886848,
"max": 2.5275730000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026614074571428584,
"min": 0.00026614074571428584,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003725970440000002,
"min": 0.003725970440000002,
"max": 0.11278454269999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015292965807020664,
"min": 0.0146558852866292,
"max": 0.4493362009525299,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.21410152316093445,
"min": 0.20518238842487335,
"max": 3.145353317260742,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 283.52830188679246,
"min": 283.52830188679246,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30054.0,
"min": 15984.0,
"max": 32310.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.716471681055033,
"min": -1.0000000521540642,
"max": 1.716471681055033,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 181.9459981918335,
"min": -30.001801684498787,
"max": 181.9459981918335,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.716471681055033,
"min": -1.0000000521540642,
"max": 1.716471681055033,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 181.9459981918335,
"min": -30.001801684498787,
"max": 181.9459981918335,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04447039237733544,
"min": 0.04447039237733544,
"max": 9.206337217241526,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.713861591997556,
"min": 4.423820535157574,
"max": 147.3013954758644,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676671954",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676674308"
},
"total": 2354.4943896759996,
"count": 1,
"self": 0.4744342049998522,
"children": {
"run_training.setup": {
"total": 0.11136947399995734,
"count": 1,
"self": 0.11136947399995734
},
"TrainerController.start_learning": {
"total": 2353.908585997,
"count": 1,
"self": 1.3756787840861762,
"children": {
"TrainerController._reset_env": {
"total": 6.210419788999843,
"count": 1,
"self": 6.210419788999843
},
"TrainerController.advance": {
"total": 2346.2365584489144,
"count": 64026,
"self": 1.4523207099264255,
"children": {
"env_step": {
"total": 1589.813076040009,
"count": 64026,
"self": 1473.9744555190482,
"children": {
"SubprocessEnvManager._take_step": {
"total": 114.98240205994671,
"count": 64026,
"self": 4.810149471877594,
"children": {
"TorchPolicy.evaluate": {
"total": 110.17225258806911,
"count": 62564,
"self": 37.16025839101849,
"children": {
"TorchPolicy.sample_actions": {
"total": 73.01199419705063,
"count": 62564,
"self": 73.01199419705063
}
}
}
}
},
"workers": {
"total": 0.856218461014123,
"count": 64026,
"self": 0.0,
"children": {
"worker_root": {
"total": 2348.0689839851025,
"count": 64026,
"is_parallel": true,
"self": 996.6513373850182,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001888210000061008,
"count": 1,
"is_parallel": true,
"self": 0.0007058979999783332,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011823120000826748,
"count": 8,
"is_parallel": true,
"self": 0.0011823120000826748
}
}
},
"UnityEnvironment.step": {
"total": 0.054386338999847794,
"count": 1,
"is_parallel": true,
"self": 0.0005168999996385537,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005059710001660278,
"count": 1,
"is_parallel": true,
"self": 0.0005059710001660278
},
"communicator.exchange": {
"total": 0.051609969000082856,
"count": 1,
"is_parallel": true,
"self": 0.051609969000082856
},
"steps_from_proto": {
"total": 0.0017534989999603567,
"count": 1,
"is_parallel": true,
"self": 0.00043657300011545885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013169259998448979,
"count": 8,
"is_parallel": true,
"self": 0.0013169259998448979
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1351.4176466000843,
"count": 64025,
"is_parallel": true,
"self": 32.38189367393511,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.51605801009123,
"count": 64025,
"is_parallel": true,
"self": 24.51605801009123
},
"communicator.exchange": {
"total": 1185.384590147026,
"count": 64025,
"is_parallel": true,
"self": 1185.384590147026
},
"steps_from_proto": {
"total": 109.13510476903184,
"count": 64025,
"is_parallel": true,
"self": 25.787784247037052,
"children": {
"_process_rank_one_or_two_observation": {
"total": 83.34732052199479,
"count": 512200,
"is_parallel": true,
"self": 83.34732052199479
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 754.9711616989791,
"count": 64026,
"self": 2.4938038619768577,
"children": {
"process_trajectory": {
"total": 172.12039312800016,
"count": 64026,
"self": 171.926932243,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19346088500014957,
"count": 2,
"self": 0.19346088500014957
}
}
},
"_update_policy": {
"total": 580.3569647090021,
"count": 448,
"self": 227.98810086895514,
"children": {
"TorchPPOOptimizer.update": {
"total": 352.36886384004697,
"count": 22788,
"self": 352.36886384004697
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.229997885995544e-07,
"count": 1,
"self": 9.229997885995544e-07
},
"TrainerController._save_models": {
"total": 0.08592805199987197,
"count": 1,
"self": 0.0014165940001475974,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08451145799972437,
"count": 1,
"self": 0.08451145799972437
}
}
}
}
}
}
}