diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_macos_compat.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_macos_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9b063af74ff1f0cacc158e94ec0b0ca886b51b8
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_macos_compat.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_modified.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_modified.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..059504950b17d99c679f04f499c6c91d97034681
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_modified.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_msvccompiler.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_msvccompiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f40a0d6d789c17113b06671a0bf48068ab861b8
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/_msvccompiler.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/archive_util.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/archive_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67ea8e7fd88ca4ed753f77ee051e066995703c77
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/archive_util.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/cmd.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/cmd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa891f753ebeeb844e01fdec737dd53969905a1e
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/cmd.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/core.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26da04c164e334b8113328ac9b669bcbe37827d2
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/core.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/debug.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/debug.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8caf20a16de0f29b0946ca22d326581034ea428d
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/debug.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..085b01b2fa09b93090761a82d2432063614490c0
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f55bd91cbb22b5d5c72874194fc0ced64b3d4725
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dist.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8418c56b9e72c90520d3c67fbbe8f3d2ee70de80
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/dist.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/extension.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/extension.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9ba208d0460db246fb3f2305c4e9595f9aa5a4d
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/extension.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..296f789886c8e0c054c9b1a11e9e695868b95723
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/spawn.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/spawn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7afbd728583c12ffa1745bf8afc60941e6fa0fc3
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/spawn.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/text_file.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/text_file.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5111b84cba7ee7ba5deced4566c072eb103f63e5
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/text_file.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73bbbfdd2fe64c6d18a8b40a58d889824c7baf52
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/version.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..354583f6a37151d077feb0deda2653eda55d75e0
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/version.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/versionpredicate.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/versionpredicate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c42b90f33323531e1dad53f4d2b4def4a98b03f
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/versionpredicate.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/zosccompiler.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/zosccompiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2d2e51f147dc3bb703b166adc62b2ede9e67459
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/__pycache__/zosccompiler.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/__init__.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e5a9bc49399043181b7a4eb6f4bed6c35b12ef3
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/support.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/support.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6afc1cb18f1298874aabfec4d9dce617c6fb892b
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/support.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8fad3061e1e9441e6cbdb477822a8cf944791d5
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_bdist_rpm.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_build.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_build.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eda6d6f08ba5bcf7160bc24952f8d498bd847b8e
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_build.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_build_clib.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_build_clib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c8796aa281aefe3efb03a86659fa6a4b66a8e66
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_build_clib.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_check.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_check.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb4894247ac57ae447a9e2a512a3c2bca0b2632a
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_check.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_clean.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_clean.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..caf674c83c9a22c0e420e8ecbcb46f7d02df5ad7
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_clean.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_cmd.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_cmd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20b5f3ec747c7575bce8116f2f837acc62f35917
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_cmd.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_config_cmd.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_config_cmd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0944364b989a1f0c50c772d6aed5ae7540ec06c
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_config_cmd.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_core.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_core.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b756567348832ffa7fc9a5e2bbecf0ffdb73b67
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_core.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..537e63fc230f793da9a9144e3864c3f530fe671f
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_cygwinccompiler.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_dir_util.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_dir_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f4ea8823736a1f5cf32334fa2f98ef86931ed88
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_dir_util.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_dist.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_dist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96870b9623c9f149610b89a8ba1a4f72a8fea5b8
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_dist.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_extension.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_extension.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91c0c93f537616e33e5539bca66c32e46e105abc
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_extension.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_file_util.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_file_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37145561deb761789670b139ee2de960714fca11
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_file_util.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_filelist.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_filelist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..664bcea666d8f1fff7212dea24c35a6016f2f8a3
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_filelist.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07dedba8d528b2371176a7312e43538a9ece9d1e
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install_headers.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install_headers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9724b1809485409164b618e26b30a595850e0f96
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install_headers.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install_lib.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install_lib.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bbf1a79f5b7e50d4356c272123f39a07fbbd6fa9
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_install_lib.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_log.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_log.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..708a0410647d9b87d08497536d7a6fda5f1bde36
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_log.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_modified.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_modified.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b8904655404eaece1536ceca6fbd366dd749527
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_modified.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_sdist.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_sdist.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a097df8a43f25d62444a6e427d3f1a43f7395c2
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_sdist.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_spawn.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_spawn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6266c0bc403006b024d1f45d76915c94945a9072
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_spawn.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_text_file.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_text_file.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e44723b0354dad564de12795532883568376c62e
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_text_file.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_unixccompiler.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_unixccompiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab78c6e7d4a5baa45fb981129cacb4a0aa204aa0
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_unixccompiler.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_util.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e73740f03156ed10c2f8b3ec70577f94a2490eed
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_util.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_versionpredicate.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_versionpredicate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f75b728a49d18defaa1b5a7f74e617cb1cca105f
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/test_versionpredicate.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/unix_compat.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/unix_compat.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c4210c603784c776ba9818f36fd168fe8b0ccbb
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/__pycache__/unix_compat.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__init__.py b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__pycache__/__init__.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ecc63cd912669d74b687ec009858ad73f0ba543b
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__pycache__/__init__.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__pycache__/py39.cpython-310.pyc b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__pycache__/py39.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7bd4ae6f9807e65cbef5331b2d3cf38245f58ff
Binary files /dev/null and b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/__pycache__/py39.cpython-310.pyc differ
diff --git a/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/py39.py b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/py39.py
new file mode 100644
index 0000000000000000000000000000000000000000..aca3939a0cea90dd0a90d7f36fcf83a5167cbfc4
--- /dev/null
+++ b/llava/lib/python3.10/site-packages/setuptools/_distutils/tests/compat/py39.py
@@ -0,0 +1,40 @@
+import sys
+
+if sys.version_info >= (3, 10):
+ from test.support.import_helper import (
+ CleanImport as CleanImport,
+ )
+ from test.support.import_helper import (
+ DirsOnSysPath as DirsOnSysPath,
+ )
+ from test.support.os_helper import (
+ EnvironmentVarGuard as EnvironmentVarGuard,
+ )
+ from test.support.os_helper import (
+ rmtree as rmtree,
+ )
+ from test.support.os_helper import (
+ skip_unless_symlink as skip_unless_symlink,
+ )
+ from test.support.os_helper import (
+ unlink as unlink,
+ )
+else:
+ from test.support import (
+ CleanImport as CleanImport,
+ )
+ from test.support import (
+ DirsOnSysPath as DirsOnSysPath,
+ )
+ from test.support import (
+ EnvironmentVarGuard as EnvironmentVarGuard,
+ )
+ from test.support import (
+ rmtree as rmtree,
+ )
+ from test.support import (
+ skip_unless_symlink as skip_unless_symlink,
+ )
+ from test.support import (
+ unlink as unlink,
+ )
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/INSTALLER b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/LICENSE b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/METADATA b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..379469a9711ac9239419110a3f32599b8c552792
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/METADATA
@@ -0,0 +1,375 @@
+Metadata-Version: 2.1
+Name: accelerate
+Version: 1.2.1
+Summary: Accelerate
+Home-page: https://github.com/huggingface/accelerate
+Author: The HuggingFace team
+Author-email: zach.mueller@huggingface.co
+License: Apache
+Keywords: deep learning
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Requires-Python: >=3.9.0
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: numpy<3.0.0,>=1.17
+Requires-Dist: packaging>=20.0
+Requires-Dist: psutil
+Requires-Dist: pyyaml
+Requires-Dist: torch>=1.10.0
+Requires-Dist: huggingface-hub>=0.21.0
+Requires-Dist: safetensors>=0.4.3
+Provides-Extra: deepspeed
+Requires-Dist: deepspeed; extra == "deepspeed"
+Provides-Extra: dev
+Requires-Dist: black~=23.1; extra == "dev"
+Requires-Dist: hf-doc-builder>=0.3.0; extra == "dev"
+Requires-Dist: ruff~=0.6.4; extra == "dev"
+Requires-Dist: pytest<=8.0.0,>=7.2.0; extra == "dev"
+Requires-Dist: pytest-xdist; extra == "dev"
+Requires-Dist: pytest-subtests; extra == "dev"
+Requires-Dist: parameterized; extra == "dev"
+Requires-Dist: datasets; extra == "dev"
+Requires-Dist: diffusers; extra == "dev"
+Requires-Dist: evaluate; extra == "dev"
+Requires-Dist: torchdata>=0.8.0; extra == "dev"
+Requires-Dist: torchpippy>=0.2.0; extra == "dev"
+Requires-Dist: transformers; extra == "dev"
+Requires-Dist: scipy; extra == "dev"
+Requires-Dist: scikit-learn; extra == "dev"
+Requires-Dist: tqdm; extra == "dev"
+Requires-Dist: bitsandbytes; extra == "dev"
+Requires-Dist: timm; extra == "dev"
+Requires-Dist: rich; extra == "dev"
+Provides-Extra: docs
+Provides-Extra: quality
+Requires-Dist: black~=23.1; extra == "quality"
+Requires-Dist: hf-doc-builder>=0.3.0; extra == "quality"
+Requires-Dist: ruff~=0.6.4; extra == "quality"
+Provides-Extra: rich
+Requires-Dist: rich; extra == "rich"
+Provides-Extra: sagemaker
+Requires-Dist: sagemaker; extra == "sagemaker"
+Provides-Extra: test_dev
+Requires-Dist: datasets; extra == "test-dev"
+Requires-Dist: diffusers; extra == "test-dev"
+Requires-Dist: evaluate; extra == "test-dev"
+Requires-Dist: torchdata>=0.8.0; extra == "test-dev"
+Requires-Dist: torchpippy>=0.2.0; extra == "test-dev"
+Requires-Dist: transformers; extra == "test-dev"
+Requires-Dist: scipy; extra == "test-dev"
+Requires-Dist: scikit-learn; extra == "test-dev"
+Requires-Dist: tqdm; extra == "test-dev"
+Requires-Dist: bitsandbytes; extra == "test-dev"
+Requires-Dist: timm; extra == "test-dev"
+Provides-Extra: test_prod
+Requires-Dist: pytest<=8.0.0,>=7.2.0; extra == "test-prod"
+Requires-Dist: pytest-xdist; extra == "test-prod"
+Requires-Dist: pytest-subtests; extra == "test-prod"
+Requires-Dist: parameterized; extra == "test-prod"
+Provides-Extra: test_trackers
+Requires-Dist: wandb; extra == "test-trackers"
+Requires-Dist: comet-ml; extra == "test-trackers"
+Requires-Dist: tensorboard; extra == "test-trackers"
+Requires-Dist: dvclive; extra == "test-trackers"
+Provides-Extra: testing
+Requires-Dist: pytest<=8.0.0,>=7.2.0; extra == "testing"
+Requires-Dist: pytest-xdist; extra == "testing"
+Requires-Dist: pytest-subtests; extra == "testing"
+Requires-Dist: parameterized; extra == "testing"
+Requires-Dist: datasets; extra == "testing"
+Requires-Dist: diffusers; extra == "testing"
+Requires-Dist: evaluate; extra == "testing"
+Requires-Dist: torchdata>=0.8.0; extra == "testing"
+Requires-Dist: torchpippy>=0.2.0; extra == "testing"
+Requires-Dist: transformers; extra == "testing"
+Requires-Dist: scipy; extra == "testing"
+Requires-Dist: scikit-learn; extra == "testing"
+Requires-Dist: tqdm; extra == "testing"
+Requires-Dist: bitsandbytes; extra == "testing"
+Requires-Dist: timm; extra == "testing"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Run your *raw* PyTorch training script on any kind of device
+
+
+
+
+
+
+## Easy to integrate
+
+🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16.
+
+🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged.
+
+Here is an example:
+
+```diff
+ import torch
+ import torch.nn.functional as F
+ from datasets import load_dataset
++ from accelerate import Accelerator
+
++ accelerator = Accelerator()
+- device = 'cpu'
++ device = accelerator.device
+
+ model = torch.nn.Transformer().to(device)
+ optimizer = torch.optim.Adam(model.parameters())
+
+ dataset = load_dataset('my_dataset')
+ data = torch.utils.data.DataLoader(dataset, shuffle=True)
+
++ model, optimizer, data = accelerator.prepare(model, optimizer, data)
+
+ model.train()
+ for epoch in range(10):
+ for source, targets in data:
+ source = source.to(device)
+ targets = targets.to(device)
+
+ optimizer.zero_grad()
+
+ output = model(source)
+ loss = F.cross_entropy(output, targets)
+
+- loss.backward()
++ accelerator.backward(loss)
+
+ optimizer.step()
+```
+
+As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16).
+
+In particular, the same code can then be run without modification on your local machine for debugging or your training environment.
+
+🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further:
+
+```diff
+ import torch
+ import torch.nn.functional as F
+ from datasets import load_dataset
++ from accelerate import Accelerator
+
+- device = 'cpu'
++ accelerator = Accelerator()
+
+- model = torch.nn.Transformer().to(device)
++ model = torch.nn.Transformer()
+ optimizer = torch.optim.Adam(model.parameters())
+
+ dataset = load_dataset('my_dataset')
+ data = torch.utils.data.DataLoader(dataset, shuffle=True)
+
++ model, optimizer, data = accelerator.prepare(model, optimizer, data)
+
+ model.train()
+ for epoch in range(10):
+ for source, targets in data:
+- source = source.to(device)
+- targets = targets.to(device)
+
+ optimizer.zero_grad()
+
+ output = model(source)
+ loss = F.cross_entropy(output, targets)
+
+- loss.backward()
++ accelerator.backward(loss)
+
+ optimizer.step()
+```
+
+Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
+
+## Launching script
+
+🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training!
+On your machine(s) just run:
+
+```bash
+accelerate config
+```
+
+and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing
+
+```bash
+accelerate launch my_script.py --args_to_my_script
+```
+
+For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo):
+
+```bash
+accelerate launch examples/nlp_example.py
+```
+
+This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience.
+
+You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`.
+
+For example, here is how to launch on two GPUs:
+
+```bash
+accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
+```
+
+To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
+
+Or view the configuration zoo [here](https://github.com/huggingface/accelerate/blob/main/examples/config_yaml_templates/)
+
+## Launching multi-CPU run using MPI
+
+🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
+Once you have MPI setup on your cluster, just run:
+```bash
+accelerate config
+```
+Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
+Then, use `accelerate launch` with your script like:
+```bash
+accelerate launch examples/nlp_example.py
+```
+Alternatively, you can use mpirun directly, without using the CLI like:
+```bash
+mpirun -np 2 python examples/nlp_example.py
+```
+
+## Launching training using DeepSpeed
+
+🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`.
+
+```python
+from accelerate import Accelerator, DeepSpeedPlugin
+
+# deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it
+# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
+deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
+accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin)
+
+# How to save your 🤗 Transformer?
+accelerator.wait_for_everyone()
+unwrapped_model = accelerator.unwrap_model(model)
+unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model))
+```
+
+Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue.
+
+## Launching your training from a notebook
+
+🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add:
+
+```python
+from accelerate import notebook_launcher
+
+notebook_launcher(training_function)
+```
+
+An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)
+
+## Why should I use 🤗 Accelerate?
+
+You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
+
+## Why shouldn't I use 🤗 Accelerate?
+
+You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them.
+
+## Frameworks using 🤗 Accelerate
+
+If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
+
+* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development.
+* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
+* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic.
+* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
+* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses.
+* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products.
+* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
+* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so.
+* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
+* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion.
+* [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric.
+* [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side).
+
+
+## Installation
+
+This repository is tested on Python 3.8+ and PyTorch 1.10.0+
+
+You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
+
+First, create a virtual environment with the version of Python you're going to use and activate it.
+
+Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows:
+
+```bash
+pip install accelerate
+```
+
+## Supported integrations
+
+- CPU only
+- multi-CPU on one node (machine)
+- multi-CPU on several nodes (machines)
+- single GPU
+- multi-GPU on one node (machine)
+- multi-GPU on several nodes (machines)
+- TPU
+- FP16/BFloat16 mixed precision
+- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine) or [MS-AMP](https://github.com/Azure/MS-AMP/)
+- DeepSpeed support (Experimental)
+- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
+- Megatron-LM support (Experimental)
+
+## Citing 🤗 Accelerate
+
+If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry.
+
+```bibtex
+@Misc{accelerate,
+ title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
+ author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
+ howpublished = {\url{https://github.com/huggingface/accelerate}},
+ year = {2022}
+}
+```
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/REQUESTED b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/WHEEL b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..08519a6603c2a9e5707d1c0cca7dc567c56ab5be
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.44.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/entry_points.txt b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b9bf6b798b250a47a3febdf0e32c88507fbf86d
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/entry_points.txt
@@ -0,0 +1,6 @@
+[console_scripts]
+accelerate = accelerate.commands.accelerate_cli:main
+accelerate-config = accelerate.commands.config:main
+accelerate-estimate-memory = accelerate.commands.estimate:main
+accelerate-launch = accelerate.commands.launch:main
+accelerate-merge-weights = accelerate.commands.merge:main
diff --git a/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/top_level.txt b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a9368375be0e0e13fdad0eea4b92541bd9e1f594
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/accelerate-1.2.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+accelerate
diff --git a/minigpt2/lib/python3.10/site-packages/cachetools-5.5.2.dist-info/INSTALLER b/minigpt2/lib/python3.10/site-packages/cachetools-5.5.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/cachetools-5.5.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/minigpt2/lib/python3.10/site-packages/cachetools-5.5.2.dist-info/RECORD b/minigpt2/lib/python3.10/site-packages/cachetools-5.5.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..bbec7f8700f64bd2372bd370a41c8896fb09c563
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/cachetools-5.5.2.dist-info/RECORD
@@ -0,0 +1,14 @@
+cachetools-5.5.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+cachetools-5.5.2.dist-info/LICENSE,sha256=I8Tv96HAJ6l3oLecRJfhdYLDNMXxfvasjKC1LR59hBc,1085
+cachetools-5.5.2.dist-info/METADATA,sha256=YY8fmEiV8he5oa9hC4S6sjLQKrDuoQhx2mQTI7Iqf5Y,5379
+cachetools-5.5.2.dist-info/RECORD,,
+cachetools-5.5.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
+cachetools-5.5.2.dist-info/top_level.txt,sha256=ai2FH78TGwoBcCgVfoqbzk5IQCtnDukdSs4zKuVPvDs,11
+cachetools/__init__.py,sha256=cutUU6fB1bIMih0ro_TVCPKJTPwM-qP4fS_PyNfQlWs,21803
+cachetools/__pycache__/__init__.cpython-310.pyc,,
+cachetools/__pycache__/_decorators.cpython-310.pyc,,
+cachetools/__pycache__/func.cpython-310.pyc,,
+cachetools/__pycache__/keys.cpython-310.pyc,,
+cachetools/_decorators.py,sha256=4_u0GL89t2BOLGwnK8CueiFtyHKK2zydoHj9aqnsMM4,3832
+cachetools/func.py,sha256=aOVfSkuNWMRADpkHZGK7LeJ_VZ8wljzbRwIAliOuhAg,3719
+cachetools/keys.py,sha256=AOgfoi-oioBOnEEk115_9qs0HKISrYnbcV4F0hyZ1yk,1777
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/badness.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/badness.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e878a510f270a8b7bd86327b347120de3ef5fd2
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/badness.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18c4ee8eaf8af29412971d131c7cad896392f490
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/chardata.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/cli.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/cli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6afb774db74fb2445b1e7c9203d4299309ded200
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/cli.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/fixes.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/fixes.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..093781c5f13678d538752d4a2ae84562730fb5c6
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/fixes.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/formatting.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/formatting.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6407d5a4e2b8f2170544f6974ac064f72dea0c9a
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/__pycache__/formatting.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__init__.py b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a449a38ed1ca3e787ba3b97cf48ecf8c3f15ec06
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__init__.py
@@ -0,0 +1,101 @@
+r"""
+The `ftfy.bad_codecs` module gives Python the ability to decode some common,
+flawed encodings.
+
+Python does not want you to be sloppy with your text. Its encoders and decoders
+("codecs") follow the relevant standards whenever possible, which means that
+when you get text that *doesn't* follow those standards, you'll probably fail
+to decode it. Or you might succeed at decoding it for implementation-specific
+reasons, which is perhaps worse.
+
+There are some encodings out there that Python wishes didn't exist, which are
+widely used outside of Python:
+
+- "utf-8-variants", a family of not-quite-UTF-8 encodings, including the
+ ever-popular CESU-8 and "Java modified UTF-8".
+- "Sloppy" versions of character map encodings, where bytes that don't map to
+ anything will instead map to the Unicode character with the same number.
+
+Simply importing this module, or in fact any part of the `ftfy` package, will
+make these new "bad codecs" available to Python through the standard Codecs
+API. You never have to actually call any functions inside `ftfy.bad_codecs`.
+
+However, if you want to call something because your code checker insists on it,
+you can call ``ftfy.bad_codecs.ok()``.
+
+A quick example of decoding text that's encoded in CESU-8:
+
+ >>> import ftfy.bad_codecs
+ >>> print(b'\xed\xa0\xbd\xed\xb8\x8d'.decode('utf-8-variants'))
+ 😍
+"""
+
+import codecs
+from encodings import normalize_encoding
+from typing import Optional
+
+_CACHE: dict[str, codecs.CodecInfo] = {}
+
+# Define some aliases for 'utf-8-variants'. All hyphens get turned into
+# underscores, because of `normalize_encoding`.
+UTF8_VAR_NAMES = (
+ "utf_8_variants",
+ "utf8_variants",
+ "utf_8_variant",
+ "utf8_variant",
+ "utf_8_var",
+ "utf8_var",
+ "cesu_8",
+ "cesu8",
+ "java_utf_8",
+ "java_utf8",
+)
+
+
+def search_function(encoding: str) -> Optional[codecs.CodecInfo]:
+ """
+ Register our "bad codecs" with Python's codecs API. This involves adding
+ a search function that takes in an encoding name, and returns a codec
+ for that encoding if it knows one, or None if it doesn't.
+
+ The encodings this will match are:
+
+ - Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
+ where the non-sloppy version is an encoding that leaves some bytes
+ unmapped to characters.
+ - The 'utf-8-variants' encoding, which has the several aliases seen
+ above.
+ """
+ if encoding in _CACHE:
+ return _CACHE[encoding]
+
+ norm_encoding = normalize_encoding(encoding)
+ codec = None
+ if norm_encoding in UTF8_VAR_NAMES:
+ from ftfy.bad_codecs.utf8_variants import CODEC_INFO
+
+ codec = CODEC_INFO
+ elif norm_encoding.startswith("sloppy_"):
+ from ftfy.bad_codecs.sloppy import CODECS
+
+ codec = CODECS.get(norm_encoding)
+
+ if codec is not None:
+ _CACHE[encoding] = codec
+
+ return codec
+
+
+def ok() -> None:
+ """
+ A feel-good function that gives you something to call after importing
+ this package.
+
+ Why is this here? Pyflakes. Pyflakes gets upset when you import a module
+ and appear not to use it. It doesn't know that you're using it when
+ you use the ``unicode.encode`` and ``bytes.decode`` methods with certain
+ encodings.
+ """
+
+
+codecs.register(search_function)
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac347c2869bd1d3c8b6ca6946be7192d51a7e311
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/__init__.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/sloppy.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/sloppy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ab042d9aad96cce17ca2d03d8e236c7484a556e
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/sloppy.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/utf8_variants.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/utf8_variants.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d79478c65f580fb1aa8f468620d7ba44a059140
Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/__pycache__/utf8_variants.cpython-310.pyc differ
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/sloppy.py b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/sloppy.py
new file mode 100644
index 0000000000000000000000000000000000000000..656f01cf260feda58a15492bce82758c1b594d86
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/sloppy.py
@@ -0,0 +1,166 @@
+r"""
+`ftfy.bad_codecs.sloppy` provides character-map encodings that fill their "holes"
+in a messy but common way: by outputting the Unicode codepoints with the same
+numbers.
+
+This is incredibly ugly, and it's also in the HTML5 standard.
+
+A single-byte encoding maps each byte to a Unicode character, except that some
+bytes are left unmapped. In the commonly-used Windows-1252 encoding, for
+example, bytes 0x81 and 0x8D, among others, have no meaning.
+
+Python, wanting to preserve some sense of decorum, will handle these bytes
+as errors. But Windows knows that 0x81 and 0x8D are possible bytes and they're
+different from each other. It just hasn't defined what they are in terms of
+Unicode.
+
+Software that has to interoperate with Windows-1252 and Unicode -- such as all
+the common Web browsers -- will pick some Unicode characters for them to map
+to, and the characters they pick are the Unicode characters with the same
+numbers: U+0081 and U+008D. This is the same as what Latin-1 does, and the
+resulting characters tend to fall into a range of Unicode that's set aside for
+obsolete Latin-1 control characters anyway.
+
+These sloppy codecs let Python do the same thing, thus interoperating with
+other software that works this way. It defines a sloppy version of many
+single-byte encodings with holes. (There is no need for a sloppy version of
+an encoding without holes: for example, there is no such thing as
+sloppy-iso-8859-2 or sloppy-macroman.)
+
+The following encodings will become defined:
+
+- sloppy-windows-1250 (Central European, sort of based on ISO-8859-2)
+- sloppy-windows-1251 (Cyrillic)
+- sloppy-windows-1252 (Western European, based on Latin-1)
+- sloppy-windows-1253 (Greek, sort of based on ISO-8859-7)
+- sloppy-windows-1254 (Turkish, based on ISO-8859-9)
+- sloppy-windows-1255 (Hebrew, based on ISO-8859-8)
+- sloppy-windows-1256 (Arabic)
+- sloppy-windows-1257 (Baltic, based on ISO-8859-13)
+- sloppy-windows-1258 (Vietnamese)
+- sloppy-cp874 (Thai, based on ISO-8859-11)
+- sloppy-iso-8859-3 (Maltese and Esperanto, I guess)
+- sloppy-iso-8859-6 (different Arabic)
+- sloppy-iso-8859-7 (Greek)
+- sloppy-iso-8859-8 (Hebrew)
+- sloppy-iso-8859-11 (Thai)
+
+Aliases such as "sloppy-cp1252" for "sloppy-windows-1252" will also be
+defined.
+
+Five of these encodings (`sloppy-windows-1250` through `sloppy-windows-1254`)
+are used within ftfy.
+
+Here are some examples, using :func:`ftfy.explain_unicode` to illustrate how
+sloppy-windows-1252 merges Windows-1252 with Latin-1:
+
+ >>> from ftfy import explain_unicode
+ >>> some_bytes = b'\x80\x81\x82'
+ >>> explain_unicode(some_bytes.decode('latin-1'))
+ U+0080 \x80 [Cc]
+ U+0081 \x81 [Cc]
+ U+0082 \x82 [Cc]
+
+ >>> explain_unicode(some_bytes.decode('windows-1252', 'replace'))
+ U+20AC € [Sc] EURO SIGN
+ U+FFFD � [So] REPLACEMENT CHARACTER
+ U+201A ‚ [Ps] SINGLE LOW-9 QUOTATION MARK
+
+ >>> explain_unicode(some_bytes.decode('sloppy-windows-1252'))
+ U+20AC € [Sc] EURO SIGN
+ U+0081 \x81 [Cc]
+ U+201A ‚ [Ps] SINGLE LOW-9 QUOTATION MARK
+"""
+
+from __future__ import annotations
+
+import codecs
+from encodings import normalize_encoding
+
+REPLACEMENT_CHAR = "\ufffd"
+
+
+def make_sloppy_codec(encoding: str) -> codecs.CodecInfo:
+ """
+ Take a codec name, and return a 'sloppy' version of that codec that can
+ encode and decode the unassigned bytes in that encoding.
+
+ Single-byte encodings in the standard library are defined using some
+ boilerplate classes surrounding the functions that do the actual work,
+ `codecs.charmap_decode` and `charmap_encode`. This function, given an
+ encoding name, *defines* those boilerplate classes.
+ """
+ # Make a bytestring of all 256 possible bytes.
+ all_bytes = bytes(range(256))
+
+ # Get a list of what they would decode to in Latin-1.
+ sloppy_chars = list(all_bytes.decode("latin-1"))
+
+ # Get a list of what they decode to in the given encoding. Use the
+ # replacement character for unassigned bytes.
+ decoded_chars = all_bytes.decode(encoding, errors="replace")
+
+ # Update the sloppy_chars list. Each byte that was successfully decoded
+ # gets its decoded value in the list. The unassigned bytes are left as
+ # they are, which gives their decoding in Latin-1.
+ for i, char in enumerate(decoded_chars):
+ if char != REPLACEMENT_CHAR:
+ sloppy_chars[i] = char
+
+ # For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
+ # control code, to encode the Unicode replacement character U+FFFD.
+ sloppy_chars[0x1A] = REPLACEMENT_CHAR
+
+ # Create the data structures that tell the charmap methods how to encode
+ # and decode in this sloppy encoding.
+ decoding_table = "".join(sloppy_chars)
+ encoding_table = codecs.charmap_build(decoding_table)
+
+ # Now produce all the class boilerplate. Look at the Python source for
+ # `encodings.cp1252` for comparison; this is almost exactly the same,
+ # except I made it follow pep8.
+ class Codec(codecs.Codec):
+ def encode(self, input: str, errors: str | None = "strict") -> tuple[bytes, int]:
+ return codecs.charmap_encode(input, errors, encoding_table)
+
+ def decode(self, input: bytes, errors: str | None = "strict") -> tuple[str, int]:
+ return codecs.charmap_decode(input, errors, decoding_table) # type: ignore[arg-type]
+
+ class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input: str, final: bool = False) -> bytes:
+ return codecs.charmap_encode(input, self.errors, encoding_table)[0]
+
+ class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input: bytes, final: bool = False) -> str: # type: ignore[override]
+ return codecs.charmap_decode(input, self.errors, decoding_table)[0] # type: ignore[arg-type]
+
+ class StreamWriter(Codec, codecs.StreamWriter):
+ pass
+
+ class StreamReader(Codec, codecs.StreamReader):
+ pass
+
+ return codecs.CodecInfo(
+ name="sloppy-" + encoding,
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+# Define a codec for each incomplete encoding. The resulting CODECS dictionary
+# can be used by the main module of ftfy.bad_codecs.
+CODECS = {}
+INCOMPLETE_ENCODINGS = (
+ [f"windows-{num}" for num in range(1250, 1259)]
+ + [f"iso-8859-{num}" for num in (3, 6, 7, 8, 11)]
+ + [f"cp{num}" for num in range(1250, 1259)]
+ + ["cp874"]
+)
+
+for _encoding in INCOMPLETE_ENCODINGS:
+ _new_name = normalize_encoding("sloppy-" + _encoding)
+ CODECS[_new_name] = make_sloppy_codec(_encoding)
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/utf8_variants.py b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/utf8_variants.py
new file mode 100644
index 0000000000000000000000000000000000000000..c15a3cf18431668c3817f9d9ff7a5478b4ccc5f8
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/bad_codecs/utf8_variants.py
@@ -0,0 +1,256 @@
+r"""
+This file defines a codec called "utf-8-variants" (or "utf-8-var"), which can
+decode text that's been encoded with a popular non-standard version of UTF-8.
+This includes CESU-8, the accidental encoding made by layering UTF-8 on top of
+UTF-16, as well as Java's twist on CESU-8 that contains a two-byte encoding for
+codepoint 0.
+
+This is particularly relevant in Python 3, which provides no other way of
+decoding CESU-8 [1]_.
+
+The easiest way to use the codec is to simply import `ftfy.bad_codecs`:
+
+ >>> import ftfy.bad_codecs
+ >>> result = b'here comes a null! \xc0\x80'.decode('utf-8-var')
+ >>> print(repr(result).lstrip('u'))
+ 'here comes a null! \x00'
+
+The codec does not at all enforce "correct" CESU-8. For example, the Unicode
+Consortium's not-quite-standard describing CESU-8 requires that there is only
+one possible encoding of any character, so it does not allow mixing of valid
+UTF-8 and CESU-8. This codec *does* allow that, just like Python 2's UTF-8
+decoder does.
+
+Characters in the Basic Multilingual Plane still have only one encoding. This
+codec still enforces the rule, within the BMP, that characters must appear in
+their shortest form. There is one exception: the sequence of bytes `0xc0 0x80`,
+instead of just `0x00`, may be used to encode the null character `U+0000`, like
+in Java.
+
+If you encode with this codec, you get legitimate UTF-8. Decoding with this
+codec and then re-encoding is not idempotent, although encoding and then
+decoding is. So this module won't produce CESU-8 for you. Look for that
+functionality in the sister module, "Breaks Text For You", coming approximately
+never.
+
+.. [1] In a pinch, you can decode CESU-8 in Python 2 using the UTF-8 codec:
+ first decode the bytes (incorrectly), then encode them, then decode them
+ again, using UTF-8 as the codec every time. But Python 2 is dead, so use
+ ftfy instead.
+"""
+
+import codecs
+import re
+from encodings.utf_8 import (
+ IncrementalDecoder as UTF8IncrementalDecoder,
+)
+from encodings.utf_8 import (
+ IncrementalEncoder as UTF8IncrementalEncoder,
+)
+from typing import Callable, Optional
+
+NAME = "utf-8-variants"
+
+# This regular expression matches all possible six-byte CESU-8 sequences,
+# plus truncations of them at the end of the string. (If any of the
+# subgroups matches $, then all the subgroups after it also have to match $,
+# as there are no more characters to match.)
+CESU8_EXPR = (
+ b"("
+ b"\xed"
+ b"([\xa0-\xaf]|$)"
+ b"([\x80-\xbf]|$)"
+ b"(\xed|$)"
+ b"([\xb0-\xbf]|$)"
+ b"([\x80-\xbf]|$)"
+ b")"
+)
+
+CESU8_RE = re.compile(CESU8_EXPR)
+
+# This expression matches isolated surrogate characters that aren't
+# CESU-8, which have to be handled carefully on Python 2.
+SURROGATE_EXPR = b"(\xed([\xa0-\xbf]|$)([\x80-\xbf]|$))"
+
+# This expression matches the Java encoding of U+0, including if it's
+# truncated and we need more bytes.
+NULL_EXPR = b"(\xc0(\x80|$))"
+
+# This regex matches cases that we need to decode differently from
+# standard UTF-8.
+SPECIAL_BYTES_RE = re.compile(b"|".join([NULL_EXPR, CESU8_EXPR, SURROGATE_EXPR]))
+
+
+class IncrementalDecoder(UTF8IncrementalDecoder):
+ """
+ An incremental decoder that extends Python's built-in UTF-8 decoder.
+
+ This encoder needs to take in bytes, possibly arriving in a stream, and
+ output the correctly decoded text. The general strategy for doing this
+ is to fall back on the real UTF-8 decoder whenever possible, because
+ the real UTF-8 decoder is way optimized, but to call specialized methods
+ we define here for the cases the real encoder isn't expecting.
+ """
+
+ @staticmethod
+ def _buffer_decode( # type: ignore[override]
+ input: bytes, errors: Optional[str], final: bool
+ ) -> tuple[str, int]:
+ """
+ Decode bytes that may be arriving in a stream, following the Codecs
+ API.
+
+ `input` is the incoming sequence of bytes. `errors` tells us how to
+ handle errors, though we delegate all error-handling cases to the real
+ UTF-8 decoder to ensure correct behavior. `final` indicates whether
+ this is the end of the sequence, in which case we should raise an
+ error given incomplete input.
+
+ Returns as much decoded text as possible, and the number of bytes
+ consumed.
+ """
+ # decoded_segments are the pieces of text we have decoded so far,
+ # and position is our current position in the byte string. (Bytes
+ # before this position have been consumed, and bytes after it have
+ # yet to be decoded.)
+ decoded_segments = []
+ position = 0
+ while True:
+ # Use _buffer_decode_step to decode a segment of text.
+ decoded, consumed = IncrementalDecoder._buffer_decode_step(
+ input[position:], errors, final
+ )
+ if consumed == 0:
+ # Either there's nothing left to decode, or we need to wait
+ # for more input. Either way, we're done for now.
+ break
+
+ # Append the decoded text to the list, and update our position.
+ decoded_segments.append(decoded)
+ position += consumed
+
+ if final:
+ # _buffer_decode_step must consume all the bytes when `final` is
+ # true.
+ assert position == len(input)
+
+ return "".join(decoded_segments), position
+
+ @staticmethod
+ def _buffer_decode_step(input: bytes, errors: Optional[str], final: bool) -> tuple[str, int]:
+ """
+ There are three possibilities for each decoding step:
+
+ - Decode as much real UTF-8 as possible.
+ - Decode a six-byte CESU-8 sequence at the current position.
+ - Decode a Java-style null at the current position.
+
+ This method figures out which step is appropriate, and does it.
+ """
+ # Get a reference to the superclass method that we'll be using for
+ # most of the real work.
+ sup = UTF8IncrementalDecoder._buffer_decode
+
+ # Find the next byte position that indicates a variant of UTF-8.
+ match = SPECIAL_BYTES_RE.search(input)
+ if match is None:
+ return sup(input, errors, final)
+
+ cutoff = match.start()
+ if cutoff > 0:
+ return sup(input[:cutoff], errors, True)
+
+ # Some byte sequence that we intend to handle specially matches
+ # at the beginning of the input.
+ if input.startswith(b"\xc0"):
+ if len(input) > 1:
+ # Decode the two-byte sequence 0xc0 0x80.
+ return "\u0000", 2
+ else:
+ if final:
+ # We hit the end of the stream. Let the superclass method
+ # handle it.
+ return sup(input, errors, True)
+ else:
+ # Wait to see another byte.
+ return "", 0
+ else:
+ # Decode a possible six-byte sequence starting with 0xed.
+ return IncrementalDecoder._buffer_decode_surrogates(sup, input, errors, final)
+
+ @staticmethod
+ def _buffer_decode_surrogates(
+ sup: Callable[[bytes, Optional[str], bool], tuple[str, int]],
+ input: bytes,
+ errors: Optional[str],
+ final: bool,
+ ) -> tuple[str, int]:
+ """
+ When we have improperly encoded surrogates, we can still see the
+ bits that they were meant to represent.
+
+ The surrogates were meant to encode a 20-bit number, to which we
+ add 0x10000 to get a codepoint. That 20-bit number now appears in
+ this form:
+
+ 11101101 1010abcd 10efghij 11101101 1011klmn 10opqrst
+
+ The CESU8_RE above matches byte sequences of this form. Then we need
+ to extract the bits and assemble a codepoint number from them.
+ """
+ if len(input) < 6:
+ if final:
+ # We found 0xed near the end of the stream, and there aren't
+ # six bytes to decode. Delegate to the superclass method to
+ # handle it as normal UTF-8. It might be a Hangul character
+ # or an error.
+ return sup(input, errors, final)
+ else:
+ # We found a surrogate, the stream isn't over yet, and we don't
+ # know enough of the following bytes to decode anything, so
+ # consume zero bytes and wait.
+ return "", 0
+ else:
+ if CESU8_RE.match(input):
+ # Given this is a CESU-8 sequence, do some math to pull out
+ # the intended 20-bit value, and consume six bytes.
+ codepoint = (
+ ((input[1] & 0x0F) << 16)
+ + ((input[2] & 0x3F) << 10)
+ + ((input[4] & 0x0F) << 6)
+ + (input[5] & 0x3F)
+ + 0x10000
+ )
+ return chr(codepoint), 6
+ else:
+ # This looked like a CESU-8 sequence, but it wasn't one.
+ # 0xed indicates the start of a three-byte sequence, so give
+ # three bytes to the superclass to decode as usual.
+ return sup(input[:3], errors, False)
+
+
+# The encoder is identical to UTF-8.
+IncrementalEncoder = UTF8IncrementalEncoder
+
+
+class StreamWriter(codecs.StreamWriter):
+ @staticmethod
+ def encode(input: str, errors: str = "strict") -> tuple[bytes, int]:
+ return IncrementalEncoder(errors).encode(input, final=True), len(input)
+
+
+class StreamReader(codecs.StreamReader):
+ @staticmethod
+ def decode(input: bytes, errors: str = "strict") -> tuple[str, int]:
+ return IncrementalDecoder(errors).decode(input, final=True), len(input)
+
+
+CODEC_INFO = codecs.CodecInfo(
+ name=NAME,
+ encode=StreamWriter.encode,
+ decode=StreamReader.decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+)
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/badness.py b/minigpt2/lib/python3.10/site-packages/ftfy/badness.py
new file mode 100644
index 0000000000000000000000000000000000000000..38ec1f44c44cdd3eba35eaa0aaf823ea37fbe0d8
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/badness.py
@@ -0,0 +1,420 @@
+"""
+`ftfy.badness` contains a heuristic that detects likely mojibake.
+
+This heuristic signals to ftfy which segments of text need to be fixed, and
+also indicates when the text can stop being fixed.
+
+The design of this heuristic is that we categorize the approximately 400
+Unicode characters that occur in UTF-8 mojibake, specifically the characters
+that come from mixing up UTF-8 with the other encodings we support. We
+identify sequences and contexts of these characters that are much more likely
+to be mojibake than intended strings, such as lowercase accented letters
+followed immediately by currency symbols.
+"""
+
+import warnings
+import re
+
+
+# There are only a few hundred characters that occur in known UTF-8 mojibake, and we can
+# characterize them:
+
+MOJIBAKE_CATEGORIES = {
+ # Characters that appear in many different contexts. Sequences that contain
+ # them are not inherently mojibake
+ "common": (
+ "\N{NO-BREAK SPACE}"
+ "\N{SOFT HYPHEN}"
+ "\N{MIDDLE DOT}"
+ "\N{ACUTE ACCENT}"
+ "\N{EN DASH}"
+ "\N{EM DASH}"
+ "\N{HORIZONTAL BAR}"
+ "\N{HORIZONTAL ELLIPSIS}"
+ "\N{RIGHT SINGLE QUOTATION MARK}"
+ ),
+ # the C1 control character range, which have no uses outside of mojibake anymore
+ "c1": "\x80-\x9f",
+ # Characters that are nearly 100% used in mojibake
+ "bad": (
+ "\N{BROKEN BAR}"
+ "\N{CURRENCY SIGN}"
+ "\N{DIAERESIS}"
+ "\N{NOT SIGN}"
+ "\N{MACRON}"
+ "\N{CEDILLA}"
+ "\N{LATIN SMALL LETTER F WITH HOOK}"
+ "\N{MODIFIER LETTER CIRCUMFLEX ACCENT}" # it's not a modifier
+ "\N{CARON}"
+ "\N{BREVE}"
+ "\N{OGONEK}"
+ "\N{SMALL TILDE}"
+ "\N{DAGGER}"
+ "\N{DOUBLE DAGGER}"
+ "\N{PER MILLE SIGN}"
+ "\N{REVERSED NOT SIGN}"
+ "\N{LOZENGE}"
+ "\ufffd"
+ # Theoretically these would appear in 'numeric' contexts, but when they
+ # co-occur with other mojibake characters, it's not really ambiguous
+ "\N{FEMININE ORDINAL INDICATOR}"
+ "\N{MASCULINE ORDINAL INDICATOR}"
+ ),
+ # Characters used in legalese
+ "law": (
+ "\N{PILCROW SIGN}"
+ "\N{SECTION SIGN}"
+ ),
+ "currency": (
+ "\N{CENT SIGN}"
+ "\N{POUND SIGN}"
+ "\N{YEN SIGN}"
+ "\N{PESETA SIGN}"
+ "\N{EURO SIGN}"
+ ),
+ "start_punctuation": (
+ "\N{INVERTED EXCLAMATION MARK}"
+ "\N{LEFT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+ "\N{INVERTED QUESTION MARK}"
+ "\N{COPYRIGHT SIGN}"
+ "\N{GREEK TONOS}"
+ "\N{GREEK DIALYTIKA TONOS}"
+ "\N{LEFT SINGLE QUOTATION MARK}"
+ "\N{SINGLE LOW-9 QUOTATION MARK}"
+ "\N{LEFT DOUBLE QUOTATION MARK}"
+ "\N{DOUBLE LOW-9 QUOTATION MARK}"
+ "\N{BULLET}"
+ "\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}"
+ "\uf8ff" # OS-specific symbol, usually the Apple logo
+ ),
+ "end_punctuation": (
+ "\N{REGISTERED SIGN}"
+ "\N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
+ "\N{DOUBLE ACUTE ACCENT}"
+ "\N{RIGHT DOUBLE QUOTATION MARK}"
+ "\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}"
+ "\N{TRADE MARK SIGN}"
+ ),
+ "numeric": (
+ "\N{SUPERSCRIPT TWO}"
+ "\N{SUPERSCRIPT THREE}"
+ "\N{SUPERSCRIPT ONE}"
+ "\N{PLUS-MINUS SIGN}"
+ "\N{VULGAR FRACTION ONE QUARTER}"
+ "\N{VULGAR FRACTION ONE HALF}"
+ "\N{VULGAR FRACTION THREE QUARTERS}"
+ "\N{MULTIPLICATION SIGN}"
+ "\N{MICRO SIGN}"
+ "\N{DIVISION SIGN}"
+ "\N{FRACTION SLASH}"
+ "\N{PARTIAL DIFFERENTIAL}"
+ "\N{INCREMENT}"
+ "\N{N-ARY PRODUCT}"
+ "\N{N-ARY SUMMATION}"
+ "\N{SQUARE ROOT}"
+ "\N{INFINITY}"
+ "\N{INTERSECTION}"
+ "\N{INTEGRAL}"
+ "\N{ALMOST EQUAL TO}"
+ "\N{NOT EQUAL TO}"
+ "\N{IDENTICAL TO}"
+ "\N{LESS-THAN OR EQUAL TO}"
+ "\N{GREATER-THAN OR EQUAL TO}"
+ "\N{NUMERO SIGN}"
+ ),
+ # Letters that might be used to make emoticon faces (kaomoji), and
+ # therefore might need to appear in more improbable-looking contexts.
+ #
+ # These are concatenated character ranges for use in a regex. I know
+ # they look like faces themselves. I think expressing the ranges like
+ # this helps to illustrate why we need to be careful with these
+ # characters.
+ "kaomoji": (
+ "Ò-Ö"
+ "Ù-Ü"
+ "ò-ö"
+ "ø-ü"
+ "\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}"
+ "\N{LATIN CAPITAL LETTER O WITH MACRON}"
+ "\N{LATIN CAPITAL LETTER U WITH MACRON}"
+ "\N{LATIN CAPITAL LETTER U WITH OGONEK}"
+ "\N{DEGREE SIGN}"
+ ),
+ "upper_accented": (
+ # LATIN CAPITAL LETTER A WITH GRAVE - LATIN CAPITAL LETTER N WITH TILDE
+ "\xc0-\xd1"
+ # skip capital O's and U's that could be used in kaomoji, but
+ # include Ø because it's very common in Arabic mojibake:
+ "\N{LATIN CAPITAL LETTER O WITH STROKE}"
+ "\N{LATIN CAPITAL LETTER U WITH DIAERESIS}"
+ "\N{LATIN CAPITAL LETTER Y WITH ACUTE}"
+ "\N{LATIN CAPITAL LETTER A WITH BREVE}"
+ "\N{LATIN CAPITAL LETTER A WITH MACRON}"
+ "\N{LATIN CAPITAL LETTER A WITH OGONEK}"
+ "\N{LATIN CAPITAL LETTER C WITH ACUTE}"
+ "\N{LATIN CAPITAL LETTER C WITH CARON}"
+ "\N{LATIN CAPITAL LETTER D WITH CARON}"
+ "\N{LATIN CAPITAL LETTER D WITH STROKE}"
+ "\N{LATIN CAPITAL LETTER E WITH OGONEK}"
+ "\N{LATIN CAPITAL LETTER E WITH CARON}"
+ "\N{LATIN CAPITAL LETTER E WITH MACRON}"
+ "\N{LATIN CAPITAL LETTER E WITH DOT ABOVE}"
+ "\N{LATIN CAPITAL LETTER G WITH BREVE}"
+ "\N{LATIN CAPITAL LETTER G WITH CEDILLA}"
+ "\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}"
+ "\N{LATIN CAPITAL LETTER I WITH MACRON}"
+ "\N{LATIN CAPITAL LETTER K WITH CEDILLA}"
+ "\N{LATIN CAPITAL LETTER L WITH ACUTE}"
+ "\N{LATIN CAPITAL LETTER L WITH CARON}"
+ "\N{LATIN CAPITAL LETTER L WITH STROKE}"
+ "\N{LATIN CAPITAL LETTER L WITH CEDILLA}"
+ "\N{LATIN CAPITAL LETTER N WITH ACUTE}"
+ "\N{LATIN CAPITAL LETTER N WITH CARON}"
+ "\N{LATIN CAPITAL LETTER N WITH CEDILLA}"
+ "\N{LATIN CAPITAL LIGATURE OE}"
+ "\N{LATIN CAPITAL LETTER R WITH CARON}"
+ "\N{LATIN CAPITAL LETTER S WITH ACUTE}"
+ "\N{LATIN CAPITAL LETTER S WITH CEDILLA}"
+ "\N{LATIN CAPITAL LETTER S WITH CARON}"
+ "\N{LATIN CAPITAL LETTER T WITH CEDILLA}"
+ "\N{LATIN CAPITAL LETTER T WITH CARON}"
+ "\N{LATIN CAPITAL LETTER U WITH RING ABOVE}"
+ "\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}"
+ "\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}"
+ "\N{LATIN CAPITAL LETTER Z WITH ACUTE}"
+ "\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}"
+ "\N{LATIN CAPITAL LETTER Z WITH CARON}"
+ "\N{CYRILLIC CAPITAL LETTER GHE WITH UPTURN}"
+ ),
+ "lower_accented": (
+ "\N{LATIN SMALL LETTER SHARP S}"
+ # LATIN SMALL LETTER A WITH GRAVE - LATIN SMALL LETTER N WITH TILDE
+ "\xe0-\xf1"
+ # skip o's and u's that could be used in kaomoji
+ "\N{LATIN SMALL LETTER A WITH BREVE}"
+ "\N{LATIN SMALL LETTER A WITH OGONEK}"
+ "\N{LATIN SMALL LETTER A WITH MACRON}"
+ "\N{LATIN SMALL LETTER C WITH ACUTE}"
+ "\N{LATIN SMALL LETTER C WITH CARON}"
+ "\N{LATIN SMALL LETTER D WITH CARON}"
+ "\N{LATIN SMALL LETTER D WITH STROKE}"
+ "\N{LATIN SMALL LETTER E WITH OGONEK}"
+ "\N{LATIN SMALL LETTER E WITH CARON}"
+ "\N{LATIN SMALL LETTER E WITH MACRON}"
+ "\N{LATIN SMALL LETTER E WITH DOT ABOVE}"
+ "\N{LATIN SMALL LETTER G WITH BREVE}"
+ "\N{LATIN SMALL LETTER G WITH CEDILLA}"
+ "\N{LATIN SMALL LETTER I WITH OGONEK}"
+ "\N{LATIN SMALL LETTER I WITH MACRON}"
+ "\N{LATIN SMALL LETTER K WITH CEDILLA}"
+ "\N{LATIN SMALL LETTER L WITH ACUTE}"
+ "\N{LATIN SMALL LETTER L WITH CARON}"
+ "\N{LATIN SMALL LETTER L WITH STROKE}"
+ "\N{LATIN SMALL LETTER L WITH CEDILLA}"
+ "\N{LATIN SMALL LIGATURE OE}"
+ "\N{LATIN SMALL LETTER R WITH ACUTE}"
+ "\N{LATIN SMALL LETTER S WITH ACUTE}"
+ "\N{LATIN SMALL LETTER S WITH CEDILLA}"
+ "\N{LATIN SMALL LETTER S WITH CARON}"
+ "\N{LATIN SMALL LETTER T WITH CARON}"
+ "\N{LATIN SMALL LETTER U WITH DIAERESIS}"
+ "\N{LATIN SMALL LETTER Z WITH ACUTE}"
+ "\N{LATIN SMALL LETTER Z WITH DOT ABOVE}"
+ "\N{LATIN SMALL LETTER Z WITH CARON}"
+ "\N{CYRILLIC SMALL LETTER GHE WITH UPTURN}"
+ "\N{LATIN SMALL LIGATURE FI}"
+ "\N{LATIN SMALL LIGATURE FL}"
+ ),
+ "upper_common": (
+ "\N{LATIN CAPITAL LETTER THORN}"
+ "\N{GREEK CAPITAL LETTER ALPHA}-\N{GREEK CAPITAL LETTER OMEGA}"
+ # not included under 'accented' because these can commonly
+ # occur at ends of words, in positions where they'd be detected
+ # as mojibake
+ "\N{GREEK CAPITAL LETTER ALPHA WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER EPSILON WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER ETA WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER IOTA WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER OMICRON WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER UPSILON WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER OMEGA WITH TONOS}"
+ "\N{GREEK CAPITAL LETTER IOTA WITH DIALYTIKA}"
+ "\N{GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA}"
+ "\N{CYRILLIC CAPITAL LETTER IO}-\N{CYRILLIC CAPITAL LETTER YA}"
+ ),
+ "lower_common": (
+ # lowercase thorn does not appear in mojibake
+ "\N{GREEK SMALL LETTER ALPHA}-\N{GREEK SMALL LETTER OMEGA}"
+ "\N{GREEK SMALL LETTER ALPHA WITH TONOS}"
+ "\N{GREEK SMALL LETTER EPSILON WITH TONOS}"
+ "\N{GREEK SMALL LETTER ETA WITH TONOS}"
+ "\N{GREEK SMALL LETTER IOTA WITH TONOS}"
+ "\N{GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS}"
+ "\N{CYRILLIC SMALL LETTER A}-\N{CYRILLIC SMALL LETTER DZHE}"
+ ),
+ "box": (
+ # omit the single horizontal line, might be used in kaomoji
+ "│┌┐┘├┤┬┼"
+ "\N{BOX DRAWINGS DOUBLE HORIZONTAL}-\N{BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL}"
+ "▀▄█▌▐░▒▓"
+ ),
+}
+
+
+# We can now build a regular expression that detects unlikely juxtapositions
+# of characters, mostly based on their categories.
+#
+# Another regular expression, which detects sequences that look more specifically
+# like UTF-8 mojibake, appears in chardata.py.
+#
+# This is a verbose regular expression, with whitespace added for somewhat more
+# readability. Remember that the only spaces that count as literal spaces in this
+# expression are ones inside character classes (square brackets).
+
+BADNESS_RE = re.compile(
+ r"""
+ [{c1}]
+ |
+ [{bad}{lower_accented}{upper_accented}{box}{start_punctuation}{end_punctuation}{currency}{numeric}{law}] [{bad}]
+ |
+ [a-zA-Z] [{lower_common}{upper_common}] [{bad}]
+ |
+ [{bad}] [{lower_accented}{upper_accented}{box}{start_punctuation}{end_punctuation}{currency}{numeric}{law}]
+ |
+ [{lower_accented}{lower_common}{box}{end_punctuation}{currency}{numeric}] [{upper_accented}]
+ |
+ [{box}{end_punctuation}{currency}{numeric}] [{lower_accented}]
+ |
+ [{lower_accented}{box}{end_punctuation}] [{currency}]
+ |
+ \s [{upper_accented}] [{currency}]
+ |
+ [{upper_accented}{box}] [{numeric}{law}]
+ |
+ [{lower_accented}{upper_accented}{box}{currency}{end_punctuation}] [{start_punctuation}] [{numeric}]
+ |
+ [{lower_accented}{upper_accented}{currency}{numeric}{box}{law}] [{end_punctuation}] [{start_punctuation}]
+ |
+ [{currency}{numeric}{box}] [{start_punctuation}]
+ |
+ [a-z] [{upper_accented}] [{start_punctuation}{currency}]
+ |
+ [{box}] [{kaomoji}]
+ |
+ [{lower_accented}{upper_accented}{currency}{numeric}{start_punctuation}{end_punctuation}{law}] [{box}]
+ |
+ [{box}] [{end_punctuation}]
+ |
+ [{lower_accented}{upper_accented}] [{start_punctuation}{end_punctuation}] \w
+ |
+
+ # The ligature œ when not followed by an unaccented Latin letter
+ [Œœ][^A-Za-z]
+ |
+
+ # Degree signs after capital letters
+ [{upper_accented}]°
+ |
+
+ # Common Windows-1252 2-character mojibake that isn't covered by the cases above
+ [ÂÃÎÐ][€œŠš¢£Ÿž\xa0\xad®©°·»{start_punctuation}{end_punctuation}–—´]
+ |
+ × [²³]
+ |
+ # Windows-1252 mojibake of Arabic words needs to include the 'common' characters.
+ # To compensate, we require four characters to be matched.
+ [ØÙ] [{common}{currency}{bad}{numeric}{start_punctuation}ŸŠ®°µ»]
+ [ØÙ] [{common}{currency}{bad}{numeric}{start_punctuation}ŸŠ®°µ»]
+ |
+
+ # Windows-1252 mojibake that starts 3-character sequences for some South Asian
+ # alphabets
+ à[²µ¹¼½¾]
+ |
+
+ # MacRoman mojibake that isn't covered by the cases above
+ √[±∂†≠®™´≤≥¥µø]
+ |
+ ≈[°¢]
+ |
+ ‚Ä[ìîïòôúùû†°¢π]
+ |
+ ‚[âó][àä°ê]
+ |
+
+ # Windows-1251 mojibake of characters in the U+2000 range
+ вЂ
+ |
+
+ # Windows-1251 mojibake of Latin-1 characters and/or the Cyrillic alphabet.
+ # Because the 2-character sequences involved here may be common, we require
+ # seeing a 3-character sequence.
+ [ВГРС][{c1}{bad}{start_punctuation}{end_punctuation}{currency}°µ][ВГРС]
+ |
+ # A distinctive five-character sequence of Cyrillic letters, which can be
+ # Windows-1251 mojibake on top of Latin-1 mojibake of Windows-1252 characters.
+ # Require a Latin letter nearby.
+ ГўВЂВ.[A-Za-z ]
+ |
+
+ # Windows-1252 encodings of 'à' and 'á', as well as \xa0 itself
+ Ã[\xa0¡]
+ |
+ [a-z]\s?[ÃÂ][ ]
+ |
+ ^[ÃÂ][ ]
+ |
+
+ # Cases where  precedes a character as an encoding of exactly the same
+ # character, and the character is common enough
+ [a-z.,?!{end_punctuation}] Â [ {start_punctuation}{end_punctuation}]
+ |
+
+ # Windows-1253 mojibake of characters in the U+2000 range
+ β€[™\xa0Ά\xad®°]
+ |
+
+ # Windows-1253 mojibake of Latin-1 characters and/or the Greek alphabet
+ [ΒΓΞΟ][{c1}{bad}{start_punctuation}{end_punctuation}{currency}°][ΒΓΞΟ]
+ |
+
+ # Windows-1257 mojibake of characters in the U+2000 range
+ ā€
+ """.format(
+ **MOJIBAKE_CATEGORIES
+ ),
+ re.VERBOSE,
+)
+
+
+def sequence_weirdness(text: str) -> int:
+ """
+ This was the name of the heuristic used in ftfy 2.x through 5.x. As an
+ attempt at compatibility with external code that calls the heuristic
+ directly, we redirect to our new heuristic, :func:`badness`.
+ """
+ warnings.warn(
+ "`sequence_weirdness()` is an old heuristic, and the current "
+ "closest equivalent is `ftfy.badness.badness()`"
+ )
+ return badness(text)
+
+
+def badness(text: str) -> int:
+ """
+ Get the 'badness' of a sequence of text, counting the number of unlikely
+ character sequences. A badness greater than 0 indicates that some of it
+ seems to be mojibake.
+ """
+ return len(BADNESS_RE.findall(text))
+
+
+def is_bad(text: str) -> bool:
+ """
+ Returns true iff the given text looks like it contains mojibake.
+
+ This can be faster than `badness`, because it returns when the first match
+ is found to a regex instead of counting matches. Note that as strings get
+ longer, they have a higher chance of returning True for `is_bad(string)`.
+ """
+ return bool(BADNESS_RE.search(text))
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/cli.py b/minigpt2/lib/python3.10/site-packages/ftfy/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..2807a8604f03cf5b11aac1ad076070ada97e371e
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/cli.py
@@ -0,0 +1,142 @@
+"""
+A command-line utility for fixing text found in a file.
+"""
+
+import os
+import sys
+from typing import Union
+
+from ftfy import TextFixerConfig, __version__, fix_file
+
+ENCODE_ERROR_TEXT_UNIX = """ftfy error:
+Unfortunately, this output stream does not support Unicode.
+
+Your system locale may be very old or misconfigured. You should use a locale
+that supports UTF-8. One way to do this is to `export LANG=C.UTF-8`.
+"""
+
+ENCODE_ERROR_TEXT_WINDOWS = """ftfy error:
+Unfortunately, this output stream does not support Unicode.
+
+You might be trying to output to the Windows Command Prompt (cmd.exe), which
+does not fully support Unicode for historical reasons. In general, we recommend
+finding a way to run Python without using cmd.exe.
+
+You can work around this problem by using the '-o filename' option in ftfy to
+output to a file instead.
+"""
+
+DECODE_ERROR_TEXT = """ftfy error:
+This input couldn't be decoded as %r. We got the following error:
+
+ %s
+
+ftfy works best when its input is in a known encoding. You can use `ftfy -g`
+to guess, if you're desperate. Otherwise, give the encoding name with the
+`-e` option, such as `ftfy -e latin-1`.
+"""
+
+SAME_FILE_ERROR_TEXT = """ftfy error:
+Can't read and write the same file. Please output to a new file instead.
+"""
+
+
+def main() -> None:
+ """
+ Run ftfy as a command-line utility.
+ """
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description=f"ftfy (fixes text for you), version {__version__}"
+ )
+ parser.add_argument(
+ "filename",
+ default="-",
+ nargs="?",
+ help="The file whose Unicode is to be fixed. Defaults to -, meaning standard input.",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ type=str,
+ default="-",
+ help="The file to output to. Defaults to -, meaning standard output.",
+ )
+ parser.add_argument(
+ "-g",
+ "--guess",
+ action="store_true",
+ help="Ask ftfy to guess the encoding of your input. This is risky. Overrides -e.",
+ )
+ parser.add_argument(
+ "-e",
+ "--encoding",
+ type=str,
+ default="utf-8",
+ help="The encoding of the input. Defaults to UTF-8.",
+ )
+ parser.add_argument(
+ "-n",
+ "--normalization",
+ type=str,
+ default="NFC",
+ help='The normalization of Unicode to apply. Defaults to NFC. Can be "none".',
+ )
+ parser.add_argument(
+ "--preserve-entities",
+ action="store_true",
+ help="Leave HTML entities as they are. The default "
+ "is to decode them, as long as no HTML tags have appeared in the file.",
+ )
+
+ args = parser.parse_args()
+
+ encoding = args.encoding
+ if args.guess:
+ encoding = None
+
+ if args.filename == "-":
+ # Get a standard input stream made of bytes, so we can decode it as
+ # whatever encoding is necessary.
+ file = sys.stdin.buffer
+ else:
+ file = open(args.filename, "rb")
+
+ if args.output == "-":
+ outfile = sys.stdout
+ else:
+ if os.path.realpath(args.output) == os.path.realpath(args.filename):
+ sys.stderr.write(SAME_FILE_ERROR_TEXT)
+ sys.exit(1)
+ outfile = open(args.output, "w", encoding="utf-8")
+
+ normalization = args.normalization
+ if normalization.lower() == "none":
+ normalization = None
+
+ unescape_html: Union[str, bool]
+ if args.preserve_entities:
+ unescape_html = False
+ else:
+ unescape_html = "auto"
+
+ config = TextFixerConfig(unescape_html=unescape_html, normalization=normalization)
+
+ try:
+ for line in fix_file(file, encoding=encoding, config=config):
+ try:
+ outfile.write(line)
+ except UnicodeEncodeError:
+ if sys.platform == "win32":
+ sys.stderr.write(ENCODE_ERROR_TEXT_WINDOWS)
+ else:
+ sys.stderr.write(ENCODE_ERROR_TEXT_UNIX)
+ sys.exit(1)
+ except UnicodeDecodeError as err:
+ sys.stderr.write(DECODE_ERROR_TEXT % (encoding, err))
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/fixes.py b/minigpt2/lib/python3.10/site-packages/ftfy/fixes.py
new file mode 100644
index 0000000000000000000000000000000000000000..41d3c2f817f0dedfb6887a64a97bb6e5dc0a118f
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/fixes.py
@@ -0,0 +1,510 @@
+"""
+The `ftfy.fixes` module contains the individual fixes that :func:`ftfy.fix_text`
+can perform, and provides the functions that are named in "explanations"
+such as the output of :func:`ftfy.fix_and_explain`.
+
+Two of these functions are particularly useful on their own, as more robust
+versions of functions in the Python standard library:
+
+- :func:`ftfy.fixes.decode_escapes`
+- :func:`ftfy.fixes.unescape_html`
+"""
+
+import codecs
+import html
+import re
+import warnings
+from re import Match
+from typing import Any
+
+import ftfy
+from ftfy.badness import is_bad
+from ftfy.chardata import (
+ ALTERED_UTF8_RE,
+ C1_CONTROL_RE,
+ CONTROL_CHARS,
+ DOUBLE_QUOTE_RE,
+ HTML_ENTITIES,
+ HTML_ENTITY_RE,
+ LIGATURES,
+ LOSSY_UTF8_RE,
+ SINGLE_QUOTE_RE,
+ UTF8_DETECTOR_RE,
+ WIDTH_MAP,
+)
+
+
+def fix_encoding_and_explain(text: str) -> Any:
+ """
+ Deprecated copy of `ftfy.fix_encoding_and_explain()`.
+ """
+ warnings.warn(
+ "`fix_encoding_and_explain()` has moved to the main module of ftfy.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return ftfy.fix_encoding_and_explain(text)
+
+
+def fix_encoding(text: str) -> str:
+ """
+ Deprecated copy of `ftfy.fix_encoding()`.
+ """
+ warnings.warn(
+ "`fix_encoding()` has moved to the main module of ftfy.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return ftfy.fix_encoding(text)
+
+
+def apply_plan(text: str, plan: list[tuple[str, str]]) -> str:
+ """
+ Deprecated copy of `ftfy.apply_plan()`.
+ """
+ warnings.warn(
+ "`apply_plan()` has moved to the main module of ftfy.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return ftfy.apply_plan(text, plan)
+
+
+def _unescape_fixup(match: Match[str]) -> str:
+ """
+ Replace one matched HTML entity with the character it represents,
+ if possible.
+ """
+ text = match.group(0)
+ if text in HTML_ENTITIES:
+ return HTML_ENTITIES[text]
+ elif text.startswith(""):
+ unescaped: str = html.unescape(text)
+
+ # If html.unescape only decoded part of the string, that's not what
+ # we want. The semicolon should be consumed.
+ if ";" in unescaped:
+ return text
+ else:
+ return unescaped
+ else:
+ return text
+
+
+def unescape_html(text: str) -> str:
+ """
+ Decode HTML entities and character references, including some nonstandard
+ ones written in all-caps.
+
+ Python has a built-in called `html.unescape` that can decode HTML escapes,
+ including a bunch of messy edge cases such as decoding escapes without
+ semicolons such as "&".
+
+ If you know you've got HTML-escaped text, applying `html.unescape` is the
+ right way to convert it to plain text. But in ambiguous situations, that
+ would create false positives. For example, the informally written text
+ "this¬ that" should not automatically be decoded as "this¬ that".
+
+ In this function, we decode the escape sequences that appear in the
+ `html.entities.html5` dictionary, as long as they are the unambiguous ones
+ that end in semicolons.
+
+ We also decode all-caps versions of Latin letters and common symbols.
+ If a database contains the name 'P&EACUTE;REZ', we can read that and intuit
+ that it was supposed to say 'PÉREZ'. This is limited to a smaller set of
+ entities, because there are many instances where entity names are
+ case-sensitive in complicated ways.
+
+ >>> unescape_html('<tag>')
+ ''
+
+ >>> unescape_html('𝒥ohn ℋancock')
+ '𝒥ohn ℋancock'
+
+ >>> unescape_html('✓')
+ '✓'
+
+ >>> unescape_html('Pérez')
+ 'Pérez'
+
+ >>> unescape_html('P&EACUTE;REZ')
+ 'PÉREZ'
+
+ >>> unescape_html('BUNDESSTRA&SZLIG;E')
+ 'BUNDESSTRASSE'
+
+ >>> unescape_html('ñ Ñ &NTILDE; &nTILDE;')
+ 'ñ Ñ Ñ &nTILDE;'
+ """
+ return HTML_ENTITY_RE.sub(_unescape_fixup, text)
+
+
+ANSI_RE = re.compile("\033\\[((?:\\d|;)*)([a-zA-Z])")
+
+
+def remove_terminal_escapes(text: str) -> str:
+ r"""
+ Strip out "ANSI" terminal escape sequences, such as those that produce
+ colored text on Unix.
+
+ >>> print(remove_terminal_escapes(
+ ... "\033[36;44mI'm blue, da ba dee da ba doo...\033[0m"
+ ... ))
+ I'm blue, da ba dee da ba doo...
+ """
+ return ANSI_RE.sub("", text)
+
+
+def uncurl_quotes(text: str) -> str:
+ r"""
+ Replace curly quotation marks with straight equivalents.
+
+ >>> print(uncurl_quotes('\u201chere\u2019s a test\u201d'))
+ "here's a test"
+ """
+ return SINGLE_QUOTE_RE.sub("'", DOUBLE_QUOTE_RE.sub('"', text))
+
+
+def fix_latin_ligatures(text: str) -> str:
+ """
+ Replace single-character ligatures of Latin letters, such as 'fi', with the
+ characters that they contain, as in 'fi'. Latin ligatures are usually not
+ intended in text strings (though they're lovely in *rendered* text). If
+ you have such a ligature in your string, it is probably a result of a
+ copy-and-paste glitch.
+
+ We leave ligatures in other scripts alone to be safe. They may be intended,
+ and removing them may lose information. If you want to take apart nearly
+ all ligatures, use NFKC normalization.
+
+ >>> print(fix_latin_ligatures("fluffiest"))
+ fluffiest
+ """
+ return text.translate(LIGATURES)
+
+
+def fix_character_width(text: str) -> str:
+ """
+ The ASCII characters, katakana, and Hangul characters have alternate
+ "halfwidth" or "fullwidth" forms that help text line up in a grid.
+
+ If you don't need these width properties, you probably want to replace
+ these characters with their standard form, which is what this function
+ does.
+
+ Note that this replaces the ideographic space, U+3000, with the ASCII
+ space, U+20.
+
+ >>> print(fix_character_width("LOUD NOISES"))
+ LOUD NOISES
+ >>> print(fix_character_width("Uターン")) # this means "U-turn"
+ Uターン
+ """
+ return text.translate(WIDTH_MAP)
+
+
+def fix_line_breaks(text: str) -> str:
+ r"""
+ Convert all line breaks to Unix style.
+
+ This will convert the following sequences into the standard \\n
+ line break:
+
+ - CRLF (\\r\\n), used on Windows and in some communication protocols
+ - CR (\\r), once used on Mac OS Classic, and now kept alive by misguided
+ software such as Microsoft Office for Mac
+ - LINE SEPARATOR (\\u2028) and PARAGRAPH SEPARATOR (\\u2029), defined by
+ Unicode and used to sow confusion and discord
+ - NEXT LINE (\\x85), a C1 control character that is certainly not what you
+ meant
+
+ The NEXT LINE character is a bit of an odd case, because it
+ usually won't show up if `fix_encoding` is also being run.
+ \\x85 is very common mojibake for \\u2026, HORIZONTAL ELLIPSIS.
+
+ >>> print(fix_line_breaks(
+ ... "This string is made of two things:\u2029"
+ ... "1. Unicode\u2028"
+ ... "2. Spite"
+ ... ))
+ This string is made of two things:
+ 1. Unicode
+ 2. Spite
+
+ For further testing and examples, let's define a function to make sure
+ we can see the control characters in their escaped form:
+
+ >>> def eprint(text):
+ ... print(text.encode('unicode-escape').decode('ascii'))
+
+ >>> eprint(fix_line_breaks("Content-type: text/plain\r\n\r\nHi."))
+ Content-type: text/plain\n\nHi.
+
+ >>> eprint(fix_line_breaks("This is how Microsoft \r trolls Mac users"))
+ This is how Microsoft \n trolls Mac users
+
+ >>> eprint(fix_line_breaks("What is this \x85 I don't even"))
+ What is this \n I don't even
+ """
+ return (
+ text.replace("\r\n", "\n")
+ .replace("\r", "\n")
+ .replace("\u2028", "\n")
+ .replace("\u2029", "\n")
+ .replace("\u0085", "\n")
+ )
+
+
+SURROGATE_RE = re.compile("[\ud800-\udfff]")
+SURROGATE_PAIR_RE = re.compile("[\ud800-\udbff][\udc00-\udfff]")
+
+
+def convert_surrogate_pair(match: Match[str]) -> str:
+ """
+ Convert a surrogate pair to the single codepoint it represents.
+
+ This implements the formula described at:
+ http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
+ """
+ pair = match.group(0)
+ codept = 0x10000 + (ord(pair[0]) - 0xD800) * 0x400 + (ord(pair[1]) - 0xDC00)
+ return chr(codept)
+
+
+def fix_surrogates(text: str) -> str:
+ """
+ Replace 16-bit surrogate codepoints with the characters they represent
+ (when properly paired), or with \ufffd otherwise.
+
+ >>> high_surrogate = chr(0xd83d)
+ >>> low_surrogate = chr(0xdca9)
+ >>> print(fix_surrogates(high_surrogate + low_surrogate))
+ 💩
+ >>> print(fix_surrogates(low_surrogate + high_surrogate))
+ ��
+
+ The above doctest had to be very carefully written, because even putting
+ the Unicode escapes of the surrogates in the docstring was causing
+ various tools to fail, which I think just goes to show why this fixer is
+ necessary.
+ """
+ if SURROGATE_RE.search(text):
+ text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
+ text = SURROGATE_RE.sub("\ufffd", text)
+ return text
+
+
+def remove_control_chars(text: str) -> str:
+ """
+ Remove various control characters that you probably didn't intend to be in
+ your text. Many of these characters appear in the table of "Characters not
+ suitable for use with markup" at
+ http://www.unicode.org/reports/tr20/tr20-9.html.
+
+ This includes:
+
+ - ASCII control characters, except for the important whitespace characters
+ (U+00 to U+08, U+0B, U+0E to U+1F, U+7F)
+ - Deprecated Arabic control characters (U+206A to U+206F)
+ - Interlinear annotation characters (U+FFF9 to U+FFFB)
+ - The Object Replacement Character (U+FFFC)
+ - The byte order mark (U+FEFF)
+
+ However, these similar characters are left alone:
+
+ - Control characters that produce whitespace (U+09, U+0A, U+0C, U+0D,
+ U+2028, and U+2029)
+ - C1 control characters (U+80 to U+9F) -- even though they are basically
+ never used intentionally, they are important clues about what mojibake
+ has happened
+ - Control characters that affect glyph rendering, such as joiners and
+ right-to-left marks (U+200C to U+200F, U+202A to U+202E)
+ - Musical notation control characters (U+1D173 to U+1D17A) because wow if
+ you're using those you probably have a good reason
+ - Tag characters, because they are now used in emoji sequences such as
+ "Flag of Wales"
+ """
+ return text.translate(CONTROL_CHARS)
+
+
+def remove_bom(text: str) -> str:
+ r"""
+ Remove a byte-order mark that was accidentally decoded as if it were part
+ of the text.
+
+ >>> print(remove_bom(chr(0xfeff) + "Where do you want to go today?"))
+ Where do you want to go today?
+ """
+ return text.lstrip(chr(0xFEFF))
+
+
+# Define a regex to match valid escape sequences in Python string literals.
+ESCAPE_SEQUENCE_RE = re.compile(
+ r"""
+ ( \\U........ # 8-digit hex escapes
+ | \\u.... # 4-digit hex escapes
+ | \\x.. # 2-digit hex escapes
+ | \\[0-7]{1,3} # Octal escapes
+ | \\N\{[^}]+\} # Unicode characters by name
+ | \\[\\'"abfnrtv] # Single-character escapes
+ )""",
+ re.UNICODE | re.VERBOSE,
+)
+
+
+def decode_escapes(text: str) -> str:
+ r"""
+ Decode backslashed escape sequences, including \\x, \\u, and \\U character
+ references, even in the presence of other Unicode.
+
+ This function has to be called specifically. It's not run automatically by
+ ftfy, because escaped text is not necessarily a mistake, and there is no
+ way to distinguish when it is.
+
+ This is what Python's "string-escape" and "unicode-escape" codecs were
+ meant to do, but in contrast, this actually works. It will decode the
+ string exactly the same way that the Python interpreter decodes its string
+ literals.
+
+ >>> factoid = '\\u20a1 is the currency symbol for the colón.'
+ >>> print(factoid[1:])
+ u20a1 is the currency symbol for the colón.
+ >>> print(decode_escapes(factoid))
+ ₡ is the currency symbol for the colón.
+
+ Even though Python itself can read string literals with a combination of
+ escapes and literal Unicode -- you're looking at one right now -- the
+ "unicode-escape" codec doesn't work on literal Unicode. (See
+ http://stackoverflow.com/a/24519338/773754 for more details.)
+
+ Instead, this function searches for just the parts of a string that
+ represent escape sequences, and decodes them, leaving the rest alone. All
+ valid escape sequences are made of ASCII characters, and this allows
+ "unicode-escape" to work correctly.
+ """
+
+ def decode_match(match: Match[str]) -> str:
+ "Given a regex match, decode the escape sequence it contains."
+ return codecs.decode(match.group(0), "unicode-escape")
+
+ return ESCAPE_SEQUENCE_RE.sub(decode_match, text)
+
+
+# This regex implements an exception to restore_byte_a0, so we can decode the
+# very common mojibake of (for example) "Ã la mode" as "à la mode", not "àla
+# mode".
+#
+# If byte C3 appears with a single space after it -- most commonly this shows
+# up as " Ã " appearing as an entire word -- we'll insert \xa0 while keeping
+# the space. Without this change, we would decode "à" as the start of the next
+# word, such as "àla". It's almost always intended to be a separate word, as in
+# "à la", but when mojibake turns this into "Ã\xa0 la", the two kinds of spaces
+# get coalesced into "Ã la".
+#
+# We make exceptions for the Portuguese words "às", "àquele", "àquela",
+# "àquilo" and their plurals -- these are contractions of, for example, "a
+# aquele" and are very common. Note that the final letter is important to
+# distinguish this case from French "à quel point".
+#
+# Other instances in Portuguese, such as "àfrica", seem to be typos (intended
+# to be "África" with the accent in the other direction).
+#
+# Unfortunately, "à" is a common letter in Catalan, and mojibake of words that
+# contain it will end up with inserted spaces. We can't do the right thing with
+# every word. The cost is that the mojibake text "fà cil" will be interpreted as
+# "fà cil", not "fàcil".
+A_GRAVE_WORD_RE = re.compile(b"\xc3 (?! |quele|quela|quilo|s )")
+
+
+def restore_byte_a0(byts: bytes) -> bytes:
+ """
+ Some mojibake has been additionally altered by a process that said "hmm,
+ byte A0, that's basically a space!" and replaced it with an ASCII space.
+ When the A0 is part of a sequence that we intend to decode as UTF-8,
+ changing byte A0 to 20 would make it fail to decode.
+
+ This process finds sequences that would convincingly decode as UTF-8 if
+ byte 20 were changed to A0, and puts back the A0. For the purpose of
+ deciding whether this is a good idea, this step gets a cost of twice
+ the number of bytes that are changed.
+
+ This is used as a step within `fix_encoding`.
+ """
+ byts = A_GRAVE_WORD_RE.sub(b"\xc3\xa0 ", byts)
+
+ def replacement(match: Match[bytes]) -> bytes:
+ "The function to apply when this regex matches."
+ return match.group(0).replace(b"\x20", b"\xa0")
+
+ return ALTERED_UTF8_RE.sub(replacement, byts)
+
+
+def replace_lossy_sequences(byts: bytes) -> bytes:
+ """
+ This function identifies sequences where information has been lost in
+ a "sloppy" codec, indicated by byte 1A, and if they would otherwise look
+ like a UTF-8 sequence, it replaces them with the UTF-8 sequence for U+FFFD.
+
+ A further explanation:
+
+ ftfy can now fix text in a few cases that it would previously fix
+ incompletely, because of the fact that it can't successfully apply the fix
+ to the entire string. A very common case of this is when characters have
+ been erroneously decoded as windows-1252, but instead of the "sloppy"
+ windows-1252 that passes through unassigned bytes, the unassigned bytes get
+ turned into U+FFFD (�), so we can't tell what they were.
+
+ This most commonly happens with curly quotation marks that appear
+ ``“ like this �``.
+
+ We can do better by building on ftfy's "sloppy codecs" to let them handle
+ less-sloppy but more-lossy text. When they encounter the character ``�``,
+ instead of refusing to encode it, they encode it as byte 1A -- an
+ ASCII control code called SUBSTITUTE that once was meant for about the same
+ purpose. We can then apply a fixer that looks for UTF-8 sequences where
+ some continuation bytes have been replaced by byte 1A, and decode the whole
+ sequence as �; if that doesn't work, it'll just turn the byte back into �
+ itself.
+
+ As a result, the above text ``“ like this �`` will decode as
+ ``“ like this �``.
+
+ If U+1A was actually in the original string, then the sloppy codecs will
+ not be used, and this function will not be run, so your weird control
+ character will be left alone but wacky fixes like this won't be possible.
+
+ This is used as a transcoder within `fix_encoding`.
+ """
+ return LOSSY_UTF8_RE.sub("\ufffd".encode(), byts)
+
+
+def decode_inconsistent_utf8(text: str) -> str:
+ """
+ Sometimes, text from one encoding ends up embedded within text from a
+ different one. This is common enough that we need to be able to fix it.
+
+ This is used as a transcoder within `fix_encoding`.
+ """
+
+ def fix_embedded_mojibake(match: Match[str]) -> str:
+ substr = match.group(0)
+
+ # Require the match to be shorter, so that this doesn't recurse infinitely
+ if len(substr) < len(text) and is_bad(substr):
+ return ftfy.fix_encoding(substr)
+ else:
+ return substr
+
+ return UTF8_DETECTOR_RE.sub(fix_embedded_mojibake, text)
+
+
+def _c1_fixer(match: Match[str]) -> str:
+ return match.group(0).encode("latin-1").decode("sloppy-windows-1252")
+
+
+def fix_c1_controls(text: str) -> str:
+ """
+ If text still contains C1 control characters, treat them as their
+ Windows-1252 equivalents. This matches what Web browsers do.
+ """
+ return C1_CONTROL_RE.sub(_c1_fixer, text)
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/formatting.py b/minigpt2/lib/python3.10/site-packages/ftfy/formatting.py
new file mode 100644
index 0000000000000000000000000000000000000000..18df64b082ddfe26f079578de57a6bb6f5d2df03
--- /dev/null
+++ b/minigpt2/lib/python3.10/site-packages/ftfy/formatting.py
@@ -0,0 +1,166 @@
+"""
+This module provides functions for justifying Unicode text in a monospaced
+display such as a terminal.
+
+We used to have our own implementation here, but now we mostly rely on
+the 'wcwidth' library.
+"""
+
+from unicodedata import normalize
+
+from wcwidth import wcswidth, wcwidth
+
+from ftfy.fixes import remove_terminal_escapes
+
+
+def character_width(char: str) -> int:
+ r"""
+ Determine the width that a character is likely to be displayed as in
+ a monospaced terminal. The width for a printable character will
+ always be 0, 1, or 2.
+
+ Nonprintable or control characters will return -1, a convention that comes
+ from wcwidth.
+
+ >>> character_width('車')
+ 2
+ >>> character_width('A')
+ 1
+ >>> character_width('\N{ZERO WIDTH JOINER}')
+ 0
+ >>> character_width('\n')
+ -1
+ """
+ return int(wcwidth(char))
+
+
+def monospaced_width(text: str) -> int:
+ r"""
+ Return the number of character cells that this string is likely to occupy
+ when displayed in a monospaced, modern, Unicode-aware terminal emulator.
+ We refer to this as the "display width" of the string.
+
+ This can be useful for formatting text that may contain non-spacing
+ characters, or CJK characters that take up two character cells.
+
+ Returns -1 if the string contains a non-printable or control character.
+
+ >>> monospaced_width('ちゃぶ台返し')
+ 12
+ >>> len('ちゃぶ台返し')
+ 6
+ >>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
+ 11
+ >>> monospaced_width('example\x80')
+ -1
+
+ A more complex example: The Korean word 'ibnida' can be written with 3
+ pre-composed characters or 7 jamo. Either way, it *looks* the same and
+ takes up 6 character cells.
+
+ >>> monospaced_width('입니다')
+ 6
+ >>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
+ 6
+
+ The word "blue" with terminal escapes to make it blue still takes up only
+ 4 characters, when shown as intended.
+ >>> monospaced_width('\x1b[34mblue\x1b[m')
+ 4
+ """
+ # NFC-normalize the text first, so that we don't need special cases for
+ # Hangul jamo.
+ #
+ # Remove terminal escapes before calculating width, because if they are
+ # displayed as intended, they will have zero width.
+ return int(wcswidth(remove_terminal_escapes(normalize("NFC", text))))
+
+
+def display_ljust(text: str, width: int, fillchar: str = " ") -> str:
+ """
+ Return `text` left-justified in a Unicode string whose display width,
+ in a monospaced terminal, should be at least `width` character cells.
+ The rest of the string will be padded with `fillchar`, which must be
+ a width-1 character.
+
+ "Left" here means toward the beginning of the string, which may actually
+ appear on the right in an RTL context. This is similar to the use of the
+ word "left" in "left parenthesis".
+
+ >>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
+ >>> for line in lines:
+ ... print(display_ljust(line, 20, '▒'))
+ Table flip▒▒▒▒▒▒▒▒▒▒
+ (╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
+ ちゃぶ台返し▒▒▒▒▒▒▒▒
+
+ This example, and the similar ones that follow, should come out justified
+ correctly when viewed in a monospaced terminal. It will probably not look
+ correct if you're viewing this code or documentation in a Web browser.
+ """
+ if character_width(fillchar) != 1:
+ raise ValueError("The padding character must have display width 1")
+
+ text_width = monospaced_width(text)
+ if text_width == -1:
+ # There's a control character here, so just don't add padding
+ return text
+
+ padding = max(0, width - text_width)
+ return text + fillchar * padding
+
+
+def display_rjust(text: str, width: int, fillchar: str = " ") -> str:
+ """
+ Return `text` right-justified in a Unicode string whose display width,
+ in a monospaced terminal, should be at least `width` character cells.
+ The rest of the string will be padded with `fillchar`, which must be
+ a width-1 character.
+
+ "Right" here means toward the end of the string, which may actually be on
+ the left in an RTL context. This is similar to the use of the word "right"
+ in "right parenthesis".
+
+ >>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
+ >>> for line in lines:
+ ... print(display_rjust(line, 20, '▒'))
+ ▒▒▒▒▒▒▒▒▒▒Table flip
+ ▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻
+ ▒▒▒▒▒▒▒▒ちゃぶ台返し
+ """
+ if character_width(fillchar) != 1:
+ raise ValueError("The padding character must have display width 1")
+
+ text_width = monospaced_width(text)
+ if text_width == -1:
+ return text
+
+ padding = max(0, width - text_width)
+ return fillchar * padding + text
+
+
+def display_center(text: str, width: int, fillchar: str = " ") -> str:
+ """
+ Return `text` centered in a Unicode string whose display width, in a
+ monospaced terminal, should be at least `width` character cells. The rest
+ of the string will be padded with `fillchar`, which must be a width-1
+ character.
+
+ >>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
+ >>> for line in lines:
+ ... print(display_center(line, 20, '▒'))
+ ▒▒▒▒▒Table flip▒▒▒▒▒
+ ▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒
+ ▒▒▒▒ちゃぶ台返し▒▒▒▒
+ """
+ if character_width(fillchar) != 1:
+ raise ValueError("The padding character must have display width 1")
+
+ text_width = monospaced_width(text)
+ if text_width == -1:
+ return text
+
+ padding = max(0, width - text_width)
+ left_padding = padding // 2
+ right_padding = padding - left_padding
+ return fillchar * left_padding + text + fillchar * right_padding
diff --git a/minigpt2/lib/python3.10/site-packages/ftfy/py.typed b/minigpt2/lib/python3.10/site-packages/ftfy/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391