ppo-PyramidsRND / run_logs /timers.json
threite's picture
Update with longer training
4e9ce67
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1675790548324585,
"min": 0.16217057406902313,
"max": 1.3953831195831299,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5030.052734375,
"min": 4826.1962890625,
"max": 42330.34375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999926.0,
"min": 29952.0,
"max": 2999926.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999926.0,
"min": 29952.0,
"max": 2999926.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8224744200706482,
"min": -0.2121364027261734,
"max": 0.9010592103004456,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 245.09738159179688,
"min": -50.276329040527344,
"max": 283.0172119140625,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.000254911050433293,
"min": -0.02823013626039028,
"max": 0.7119223475456238,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.07596348971128464,
"min": -8.327890396118164,
"max": 168.72560119628906,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0689286315149327,
"min": 0.06372497180786248,
"max": 0.07353446988962167,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9650008412090578,
"min": 0.4994683060644532,
"max": 1.086479228603375,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013487047703068635,
"min": 0.00011577515347134225,
"max": 0.03296545092176792,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1888186678429609,
"min": 0.0015050769951274494,
"max": 0.23508479958343065,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5662709065142888e-06,
"min": 1.5662709065142888e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.1927792691200042e-05,
"min": 2.1927792691200042e-05,
"max": 0.0039273318908894,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10052205714285714,
"min": 0.10052205714285714,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4073088,
"min": 1.3962282666666668,
"max": 2.7675394666666664,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.215350857142868e-05,
"min": 6.215350857142868e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008701491200000015,
"min": 0.0008701491200000015,
"max": 0.13092014894,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005784022156149149,
"min": 0.005301194731146097,
"max": 0.9048455357551575,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.08097630739212036,
"min": 0.0795179232954979,
"max": 6.333918571472168,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 219.42335766423358,
"min": 188.53048780487805,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30061.0,
"min": 15984.0,
"max": 32828.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7805766305566704,
"min": -1.0000000521540642,
"max": 1.7994674732165834,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 243.93899838626385,
"min": -31.998001664876938,
"max": 293.3131981343031,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7805766305566704,
"min": -1.0000000521540642,
"max": 1.7994674732165834,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 243.93899838626385,
"min": -31.998001664876938,
"max": 293.3131981343031,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013117933525870823,
"min": 0.011766130464050888,
"max": 19.960637274198234,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7971568930443027,
"min": 1.6039180451552966,
"max": 319.37019638717175,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673867807",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --force --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673874672"
},
"total": 6864.8881963250005,
"count": 1,
"self": 0.42651065999962157,
"children": {
"run_training.setup": {
"total": 0.10514022699999259,
"count": 1,
"self": 0.10514022699999259
},
"TrainerController.start_learning": {
"total": 6864.3565454380005,
"count": 1,
"self": 3.772354387128871,
"children": {
"TrainerController._reset_env": {
"total": 6.280578569000227,
"count": 1,
"self": 6.280578569000227
},
"TrainerController.advance": {
"total": 6854.216574231871,
"count": 195003,
"self": 4.004867659939009,
"children": {
"env_step": {
"total": 4870.815900672944,
"count": 195003,
"self": 4559.99746307044,
"children": {
"SubprocessEnvManager._take_step": {
"total": 308.43615814668783,
"count": 195003,
"self": 12.661934140508492,
"children": {
"TorchPolicy.evaluate": {
"total": 295.77422400617934,
"count": 187550,
"self": 99.84544257111293,
"children": {
"TorchPolicy.sample_actions": {
"total": 195.9287814350664,
"count": 187550,
"self": 195.9287814350664
}
}
}
}
},
"workers": {
"total": 2.382279455815933,
"count": 195003,
"self": 0.0,
"children": {
"worker_root": {
"total": 6850.26255202094,
"count": 195003,
"is_parallel": true,
"self": 2589.9094462507464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016592880001553567,
"count": 1,
"is_parallel": true,
"self": 0.0006072650007808988,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010520229993744579,
"count": 8,
"is_parallel": true,
"self": 0.0010520229993744579
}
}
},
"UnityEnvironment.step": {
"total": 0.06567906500004028,
"count": 1,
"is_parallel": true,
"self": 0.0005069389994787343,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003919000000678352,
"count": 1,
"is_parallel": true,
"self": 0.0003919000000678352
},
"communicator.exchange": {
"total": 0.06307000100014193,
"count": 1,
"is_parallel": true,
"self": 0.06307000100014193
},
"steps_from_proto": {
"total": 0.0017102250003517838,
"count": 1,
"is_parallel": true,
"self": 0.0004076020004504244,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013026229999013594,
"count": 8,
"is_parallel": true,
"self": 0.0013026229999013594
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4260.353105770194,
"count": 195002,
"is_parallel": true,
"self": 84.96059891143796,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 66.24082947365332,
"count": 195002,
"is_parallel": true,
"self": 66.24082947365332
},
"communicator.exchange": {
"total": 3806.085641901141,
"count": 195002,
"is_parallel": true,
"self": 3806.085641901141
},
"steps_from_proto": {
"total": 303.06603548396106,
"count": 195002,
"is_parallel": true,
"self": 67.04157252854793,
"children": {
"_process_rank_one_or_two_observation": {
"total": 236.02446295541313,
"count": 1560016,
"is_parallel": true,
"self": 236.02446295541313
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1979.3958058989888,
"count": 195003,
"self": 7.312480344824053,
"children": {
"process_trajectory": {
"total": 437.3572831341439,
"count": 195003,
"self": 436.78805104514277,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5692320890011615,
"count": 6,
"self": 0.5692320890011615
}
}
},
"_update_policy": {
"total": 1534.7260424200208,
"count": 1386,
"self": 584.1678885040551,
"children": {
"TorchPPOOptimizer.update": {
"total": 950.5581539159657,
"count": 68400,
"self": 950.5581539159657
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0040002962341532e-06,
"count": 1,
"self": 1.0040002962341532e-06
},
"TrainerController._save_models": {
"total": 0.08703724599945417,
"count": 1,
"self": 0.0017129400002886541,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08532430599916552,
"count": 1,
"self": 0.08532430599916552
}
}
}
}
}
}
}