ppo-Pyramids / run_logs /timers.json
madoe001's picture
Pyramids first version
43f9200
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4699968099594116,
"min": 0.4699968099594116,
"max": 1.4585990905761719,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14272.86328125,
"min": 14114.0205078125,
"max": 44248.0625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989915.0,
"min": 29952.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989915.0,
"min": 29952.0,
"max": 989915.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.52896648645401,
"min": -0.09490203857421875,
"max": 0.52896648645401,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.9947509765625,
"min": -22.68158721923828,
"max": 145.9947509765625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.009597777388989925,
"min": -0.023382294923067093,
"max": 0.37579816579818726,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.648986577987671,
"min": -6.149543762207031,
"max": 90.19155883789062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06678406943942213,
"min": 0.06594012344251315,
"max": 0.07357809395806998,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0017610415913318,
"min": 0.5043265071460689,
"max": 1.0747590112047927,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01673373573058699,
"min": 0.0003517762425772662,
"max": 0.01673373573058699,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25100603595880483,
"min": 0.0045730911535044605,
"max": 0.25100603595880483,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.473177508973331e-06,
"min": 7.473177508973331e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011209766263459996,
"min": 0.00011209766263459996,
"max": 0.0035073836308722,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249102666666669,
"min": 0.10249102666666669,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373654000000003,
"min": 1.3886848,
"max": 2.5691278000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000258853564,
"min": 0.000258853564,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038828034599999997,
"min": 0.0038828034599999997,
"max": 0.11693586722,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008434569463133812,
"min": 0.008434569463133812,
"max": 0.5537980794906616,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12651854753494263,
"min": 0.12285639345645905,
"max": 3.876586675643921,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 337.3,
"min": 337.3,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30357.0,
"min": 15984.0,
"max": 32586.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6182355388171143,
"min": -1.0000000521540642,
"max": 1.6182355388171143,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.6411984935403,
"min": -31.994401648640633,
"max": 145.6411984935403,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6182355388171143,
"min": -1.0000000521540642,
"max": 1.6182355388171143,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.6411984935403,
"min": -31.994401648640633,
"max": 145.6411984935403,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.029758545881809872,
"min": 0.029758545881809872,
"max": 11.713482340797782,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.6782691293628886,
"min": 2.6782691293628886,
"max": 187.4157174527645,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689447766",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689450008"
},
"total": 2242.5644062190004,
"count": 1,
"self": 0.6325759850014947,
"children": {
"run_training.setup": {
"total": 0.06380634699985421,
"count": 1,
"self": 0.06380634699985421
},
"TrainerController.start_learning": {
"total": 2241.8680238869993,
"count": 1,
"self": 1.456567766988428,
"children": {
"TrainerController._reset_env": {
"total": 4.607845717000146,
"count": 1,
"self": 4.607845717000146
},
"TrainerController.advance": {
"total": 2235.7015575800106,
"count": 63607,
"self": 1.4794515209632664,
"children": {
"env_step": {
"total": 1559.2872784390167,
"count": 63607,
"self": 1439.2351780020745,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.19485846995485,
"count": 63607,
"self": 5.083326818963087,
"children": {
"TorchPolicy.evaluate": {
"total": 114.11153165099176,
"count": 62550,
"self": 114.11153165099176
}
}
},
"workers": {
"total": 0.8572419669874307,
"count": 63607,
"self": 0.0,
"children": {
"worker_root": {
"total": 2236.7360798100804,
"count": 63607,
"is_parallel": true,
"self": 918.2822563931329,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030878509999183734,
"count": 1,
"is_parallel": true,
"self": 0.0008387449995552743,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002249106000363099,
"count": 8,
"is_parallel": true,
"self": 0.002249106000363099
}
}
},
"UnityEnvironment.step": {
"total": 0.051817946000028314,
"count": 1,
"is_parallel": true,
"self": 0.0006091679999826738,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005076100001133454,
"count": 1,
"is_parallel": true,
"self": 0.0005076100001133454
},
"communicator.exchange": {
"total": 0.04876558299997669,
"count": 1,
"is_parallel": true,
"self": 0.04876558299997669
},
"steps_from_proto": {
"total": 0.0019355849999556085,
"count": 1,
"is_parallel": true,
"self": 0.0003444240003318555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001591160999623753,
"count": 8,
"is_parallel": true,
"self": 0.001591160999623753
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1318.4538234169474,
"count": 63606,
"is_parallel": true,
"self": 34.936699585982296,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.701095417033684,
"count": 63606,
"is_parallel": true,
"self": 24.701095417033684
},
"communicator.exchange": {
"total": 1145.520465601882,
"count": 63606,
"is_parallel": true,
"self": 1145.520465601882
},
"steps_from_proto": {
"total": 113.29556281204941,
"count": 63606,
"is_parallel": true,
"self": 22.091726551123884,
"children": {
"_process_rank_one_or_two_observation": {
"total": 91.20383626092553,
"count": 508848,
"is_parallel": true,
"self": 91.20383626092553
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 674.9348276200305,
"count": 63607,
"self": 2.782474563043479,
"children": {
"process_trajectory": {
"total": 120.27217366198579,
"count": 63607,
"self": 120.05295540898601,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21921825299978082,
"count": 2,
"self": 0.21921825299978082
}
}
},
"_update_policy": {
"total": 551.8801793950013,
"count": 449,
"self": 354.4514479999782,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.42873139502308,
"count": 22785,
"self": 197.42873139502308
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5360001270892099e-06,
"count": 1,
"self": 1.5360001270892099e-06
},
"TrainerController._save_models": {
"total": 0.10205128699999477,
"count": 1,
"self": 0.0014822989996901015,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10056898800030467,
"count": 1,
"self": 0.10056898800030467
}
}
}
}
}
}
}