diff --git a/.gitattributes b/.gitattributes index 3264653a3fa4a684b8b471ba641cb2111b70b5dc..749e155835033390f51715d77e3dddfa6bcc3743 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1110,3 +1110,17 @@ mgm/lib/python3.10/site-packages/pandas/_libs/writers.cpython-310-x86_64-linux-g mgm/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text mgm/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text mgm/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so filter=lfs diff=lfs merge=lfs -text +mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so filter=lfs diff=lfs merge=lfs -text diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10fb399bd63a4fa55f9387b6d09a69228def5a3e Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70d9d86281232319ae98078437ad4afeb539f70d Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e22ca72c80465da387bb1b3c4906027668ccfae Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45396bc92248a9e8dc24358d124b40b91553f800 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2390bb6d2182764b8916bca852e950db899d483 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89958bb889f3ac377398cc388f806b28f8a13ec5 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cc09e509c7af8c16095a3b974f9e520568d9901 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cextension.py b/mgm/lib/python3.10/site-packages/bitsandbytes/cextension.py new file mode 100644 index 0000000000000000000000000000000000000000..d52a6d60722f271ea34435561d0235bba32f9622 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/cextension.py @@ -0,0 +1,42 @@ +import ctypes as ct +import os +import torch + +from pathlib import Path +from warnings import warn + +from bitsandbytes.cuda_setup.main import CUDASetup + + +setup = CUDASetup.get_instance() +if setup.initialized != True: + setup.run_cuda_setup() + +lib = setup.lib +try: + if lib is None and torch.cuda.is_available(): + CUDASetup.get_instance().generate_instructions() + CUDASetup.get_instance().print_log_stack() + raise RuntimeError(''' + CUDA Setup failed despite GPU being available. Please run the following command to get more information: + + python -m bitsandbytes + + Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them + to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes + and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''') + lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False + lib.get_context.restype = ct.c_void_p + lib.get_cusparse.restype = ct.c_void_p + lib.cget_managed_ptr.restype = ct.c_void_p + COMPILED_WITH_CUDA = True +except AttributeError as ex: + warn("The installed version of bitsandbytes was compiled without GPU support. " + "8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.") + COMPILED_WITH_CUDA = False + print(str(ex)) + + +# print the setup details after checking for errors so we do not print twice +#if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0': + #setup.print_log_stack() diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af4db6adb04e55783e4c43910c32a53c751b86f8 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d46aeb9661b020aeec1c03e19c6f09f09faf0565 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc5264f3707eaa7510274ccd7939a086e004d099 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/env_vars.py b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/env_vars.py new file mode 100644 index 0000000000000000000000000000000000000000..4fcb643eeecf990b1b3d02fb4f6039bdeba38640 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/env_vars.py @@ -0,0 +1,52 @@ +import os +from typing import Dict + + +def to_be_ignored(env_var: str, value: str) -> bool: + ignorable = { + "PWD", # PWD: this is how the shell keeps track of the current working dir + "OLDPWD", + "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated + "SSH_TTY", + "HOME", # Linux shell default + "TMUX", # Terminal Multiplexer + "XDG_DATA_DIRS", # XDG: Desktop environment stuff + "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff + "XDG_RUNTIME_DIR", + "MAIL", # something related to emails + "SHELL", # binary for currently invoked shell + "DBUS_SESSION_BUS_ADDRESS", # hardware related + "PATH", # this is for finding binaries, not libraries + "LESSOPEN", # related to the `less` command + "LESSCLOSE", + "_", # current Python interpreter + } + return env_var in ignorable + + +def might_contain_a_path(candidate: str) -> bool: + return "/" in candidate + + +def is_active_conda_env(env_var: str) -> bool: + return "CONDA_PREFIX" == env_var + + +def is_other_conda_env_var(env_var: str) -> bool: + return "CONDA" in env_var + + +def is_relevant_candidate_env_var(env_var: str, value: str) -> bool: + return is_active_conda_env(env_var) or ( + might_contain_a_path(value) and not + is_other_conda_env_var(env_var) and not + to_be_ignored(env_var, value) + ) + + +def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]: + return { + env_var: value + for env_var, value in os.environ.items() + if is_relevant_candidate_env_var(env_var, value) + } diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/main.py b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f3edf4c7385a73cbea00fb3b6d19f4d800cf8a50 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/main.py @@ -0,0 +1,364 @@ +""" +extract factors the build is dependent on: +[X] compute capability + [ ] TODO: Q - What if we have multiple GPUs of different makes? +- CUDA version +- Software: + - CPU-only: only CPU quantization functions (no optimizer, no matrix multipl) + - CuBLAS-LT: full-build 8-bit optimizer + - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`) + +evaluation: + - if paths faulty, return meaningful error + - else: + - determine CUDA version + - determine capabilities + - based on that set the default path +""" + +import ctypes as ct +import os +import errno +import torch +from warnings import warn +from itertools import product + +from pathlib import Path +from typing import Set, Union +from .env_vars import get_potentially_lib_path_containing_env_vars + +# these are the most common libs names +# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead +# we have libcudart.so.11.0 which causes a lot of errors before +# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt +CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0'] + +# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths +backup_paths = [] +backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0') + +class CUDASetup: + _instance = None + + def __init__(self): + raise RuntimeError("Call get_instance() instead") + + def generate_instructions(self): + if getattr(self, 'error', False): return + print(self.error) + self.error = True + if not self.cuda_available: + self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.') + self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.') + self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:') + self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null') + self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a') + self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc') + self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)') + return + + if self.cudart_path is None: + self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.') + self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable') + self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null') + self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a') + self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc') + self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.') + self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh') + self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.') + self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local') + return + + make_cmd = f'CUDA_VERSION={self.cuda_version_string}' + if len(self.cuda_version_string) < 3: + make_cmd += ' make cuda92' + elif self.cuda_version_string == '110': + make_cmd += ' make cuda110' + elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0: + make_cmd += ' make cuda11x' + elif self.cuda_version_string == '100': + self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.') + self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.') + return + + + has_cublaslt = is_cublasLt_compatible(self.cc) + if not has_cublaslt: + make_cmd += '_nomatmul' + + self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:') + self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git') + self.add_log_entry('cd bitsandbytes') + self.add_log_entry(make_cmd) + self.add_log_entry('python setup.py install') + + def initialize(self): + if not getattr(self, 'initialized', False): + self.has_printed = False + self.lib = None + self.initialized = False + self.error = False + + def manual_override(self): + if torch.cuda.is_available(): + if 'BNB_CUDA_VERSION' in os.environ: + if len(os.environ['BNB_CUDA_VERSION']) > 0: + warn((f'\n\n{"="*80}\n' + 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n' + 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n' + 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n' + 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n' + 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH: Set[Path]: + return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} + + +def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]: + existent_directories: Set[Path] = set() + for path in candidate_paths: + try: + if path.exists(): + existent_directories.add(path) + except OSError as exc: + if exc.errno != errno.ENAMETOOLONG: + raise exc + except PermissionError as pex: + pass + + non_existent_directories: Set[Path] = candidate_paths - existent_directories + if non_existent_directories: + CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to " + f"be non-existent: {non_existent_directories}", is_warning=False) + + return existent_directories + + +def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]: + paths = set() + for libname in CUDA_RUNTIME_LIBS: + for path in candidate_paths: + if (path / libname).is_file(): + paths.add(path / libname) + return paths + + +def resolve_paths_list(paths_list_candidate: str) -> Set[Path]: + """ + Searches a given environmental var for the CUDA runtime library, + i.e. `libcudart.so`. + """ + return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate)) + + +def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]: + return get_cuda_runtime_lib_paths( + resolve_paths_list(paths_list_candidate) + ) + + +def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None: + if len(results_paths) > 1: + warning_msg = ( + f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. " + "We select the PyTorch default libcudart.so, which is {torch.version.cuda}," + "but this might missmatch with the CUDA version that is needed for bitsandbytes." + "To override this behavior set the BNB_CUDA_VERSION= environmental variable" + "For example, if you want to use the CUDA version 122" + "BNB_CUDA_VERSION=122 python ..." + "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122" + "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g." + "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2") + CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True) + + +def determine_cuda_runtime_lib_path() -> Union[Path, None]: + """ + Searches for a cuda installations, in the following order of priority: + 1. active conda env + 2. LD_LIBRARY_PATH + 3. any other env vars, while ignoring those that + - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`) + - don't contain the path separator `/` + + If multiple libraries are found in part 3, we optimistically try one, + while giving a warning message. + """ + candidate_env_vars = get_potentially_lib_path_containing_env_vars() + + cuda_runtime_libs = set() + if "CONDA_PREFIX" in candidate_env_vars: + conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib" + + conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path)) + warn_in_case_of_duplicates(conda_cuda_libs) + + if conda_cuda_libs: + cuda_runtime_libs.update(conda_cuda_libs) + + CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain ' + f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True) + + if "LD_LIBRARY_PATH" in candidate_env_vars: + lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"]) + + if lib_ld_cuda_libs: + cuda_runtime_libs.update(lib_ld_cuda_libs) + warn_in_case_of_duplicates(lib_ld_cuda_libs) + + CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain ' + f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True) + + remaining_candidate_env_vars = { + env_var: value for env_var, value in candidate_env_vars.items() + if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} + } + + cuda_runtime_libs = set() + for env_var, value in remaining_candidate_env_vars.items(): + cuda_runtime_libs.update(find_cuda_lib_in(value)) + + if len(cuda_runtime_libs) == 0: + CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...') + cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64')) + + warn_in_case_of_duplicates(cuda_runtime_libs) + + cuda_setup = CUDASetup.get_instance() + cuda_setup.add_log_entry(f'DEBUG: Possible options found for libcudart.so: {cuda_runtime_libs}') + + return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None + + +# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION +def get_cuda_version(): + major, minor = map(int, torch.version.cuda.split(".")) + + if major < 11: + CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') + + return f'{major}{minor}' + +def get_compute_capabilities(): + ccs = [] + for i in range(torch.cuda.device_count()): + cc_major, cc_minor = torch.cuda.get_device_capability(torch.cuda.device(i)) + ccs.append(f"{cc_major}.{cc_minor}") + + return ccs + + +def evaluate_cuda_setup(): + cuda_setup = CUDASetup.get_instance() + if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0': + cuda_setup.add_log_entry('') + cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35) + cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'), + ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')) + cuda_setup.add_log_entry('='*80) + if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None + + cudart_path = determine_cuda_runtime_lib_path() + ccs = get_compute_capabilities() + ccs.sort() + cc = ccs[-1] # we take the highest capability + cuda_version_string = get_cuda_version() + + cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.") + cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:" + "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md") + + + # 7.5 is the minimum CC vor cublaslt + has_cublaslt = is_cublasLt_compatible(cc) + + # TODO: + # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible) + # (2) Multiple CUDA versions installed + + # we use ls -l instead of nvcc to determine the cuda version + # since most installations will have the libcudart.so installed, but not the compiler + + if has_cublaslt: + binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so" + else: + "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so" + binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so" + + return binary_name, cudart_path, cc, cuda_version_string diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/functional.py b/mgm/lib/python3.10/site-packages/bitsandbytes/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..837f6bf1cf85ea5e8d8f509ed07937d6648a5ded --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/functional.py @@ -0,0 +1,2404 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import ctypes as ct +import itertools +import operator +import random +import torch +import itertools +import math +from scipy.stats import norm +import numpy as np + +from functools import reduce # Required in Python 3 +from typing import Tuple +from torch import Tensor + +from .cextension import COMPILED_WITH_CUDA, lib + + +# math.prod not compatible with python < 3.8 +def prod(iterable): + return reduce(operator.mul, iterable, 1) + +name2qmap = {} + +if COMPILED_WITH_CUDA: + """C FUNCTIONS FOR OPTIMIZERS""" + str2optimizer32bit = {} + str2optimizer32bit["adam"] = (lib.cadam32bit_grad_fp32, lib.cadam32bit_grad_fp16, lib.cadam32bit_grad_bf16) + str2optimizer32bit["momentum"] = ( + lib.cmomentum32bit_grad_32, + lib.cmomentum32bit_grad_16, + ) + str2optimizer32bit["rmsprop"] = ( + lib.crmsprop32bit_grad_32, + lib.crmsprop32bit_grad_16, + ) + str2optimizer32bit["lion"] = (lib.clion32bit_grad_fp32, lib.clion32bit_grad_fp16, lib.clion32bit_grad_bf16) + str2optimizer32bit["adagrad"] = ( + lib.cadagrad32bit_grad_32, + lib.cadagrad32bit_grad_16, + ) + + str2optimizer8bit = {} + str2optimizer8bit["adam"] = ( + lib.cadam_static_8bit_grad_32, + lib.cadam_static_8bit_grad_16, + ) + str2optimizer8bit["momentum"] = ( + lib.cmomentum_static_8bit_grad_32, + lib.cmomentum_static_8bit_grad_16, + ) + str2optimizer8bit["rmsprop"] = ( + lib.crmsprop_static_8bit_grad_32, + lib.crmsprop_static_8bit_grad_16, + ) + str2optimizer8bit["lion"] = ( + lib.clion_static_8bit_grad_32, + lib.clion_static_8bit_grad_16, + ) + str2optimizer8bit["lamb"] = ( + lib.cadam_static_8bit_grad_32, + lib.cadam_static_8bit_grad_16, + ) + str2optimizer8bit["lars"] = ( + lib.cmomentum_static_8bit_grad_32, + lib.cmomentum_static_8bit_grad_16, + ) + + str2optimizer8bit_blockwise = {} + str2optimizer8bit_blockwise["adam"] = ( + lib.cadam_8bit_blockwise_grad_fp32, + lib.cadam_8bit_blockwise_grad_fp16, + lib.cadam_8bit_blockwise_grad_bf16, + ) + str2optimizer8bit_blockwise["momentum"] = ( + lib.cmomentum_8bit_blockwise_grad_fp32, + lib.cmomentum_8bit_blockwise_grad_fp16, + ) + str2optimizer8bit_blockwise["rmsprop"] = ( + lib.crmsprop_8bit_blockwise_grad_fp32, + lib.crmsprop_8bit_blockwise_grad_fp16, + ) + str2optimizer8bit_blockwise["lion"] = ( + lib.clion_8bit_blockwise_grad_fp32, + lib.clion_8bit_blockwise_grad_fp16, + lib.clion_8bit_blockwise_grad_bf16, + ) + str2optimizer8bit_blockwise["adagrad"] = ( + lib.cadagrad_8bit_blockwise_grad_fp32, + lib.cadagrad_8bit_blockwise_grad_fp16, + ) + +class GlobalPageManager: + _instance = None + + def __init__(self): + raise RuntimeError("Call get_instance() instead") + + def initialize(self): + self.paged_tensors = [] + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + + def prefetch_all(self, to_cpu=False): + # assume the first added, will be hte + # ones that are used first, so swap them in last + # in the case they are evicted again + for t in self.paged_tensors[::-1]: + prefetch_tensor(t, to_cpu) + + + +class CUBLAS_Context: + _instance = None + + def __init__(self): + raise RuntimeError("Call get_instance() instead") + + def initialize(self): + self.context = {} + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + + def get_context(self, device): + if device.index not in self.context: + prev_device = torch.cuda.current_device() + torch.cuda.set_device(device) + self.context[device.index] = ct.c_void_p(lib.get_context()) + torch.cuda.set_device(prev_device) + return self.context[device.index] + + +class Cusparse_Context: + _instance = None + + def __init__(self): + raise RuntimeError("Call get_instance() instead") + + def initialize(self): + self.context = ct.c_void_p(lib.get_cusparse()) + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + +dtype2bytes = {} +dtype2bytes[torch.float32] = 4 +dtype2bytes[torch.float16] = 2 +dtype2bytes[torch.bfloat16] = 2 +dtype2bytes[torch.uint8] = 1 +dtype2bytes[torch.int8] = 1 + +def get_paged(*shape, dtype=torch.float32, device=torch.device('cuda', index=0)): + num_bytes = dtype2bytes[dtype]*prod(shape) + cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes)) + c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int)) + new_array = np.ctypeslib.as_array(c_ptr, shape=shape) + out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape) + out.is_paged = True + out.page_deviceid = device.index + return out + +def prefetch_tensor(A, to_cpu=False): + assert A.is_paged, 'Only paged tensors can be prefetched!' + if to_cpu: + deviceid = -1 + else: + deviceid = A.page_deviceid + + num_bytes = dtype2bytes[A.dtype]*A.numel() + lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid)) + +def elementwise_func(func_name, A, B, value, prefetch=True): + func = None + if A.dtype == torch.float32: + func = getattr(lib, f'c{func_name}_fp32', None) + cvalue = ct.c_float(value) + elif A.dtype == torch.uint8: + func = getattr(lib, f'c{func_name}_uint8', None) + cvalue = ct.c_uint8(value) + + if func is None: raise NotImplementedError(f'Function not implemented: {func_name}') + + is_managed = getattr(A, 'is_managed', False) + if is_managed and prefetch: + prefetch_tensor(A) + if B is not None: prefetch_tensor(B) + + func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel())) + if A.is_paged or B.is_paged: + # paged function are fully asynchronous + # if we return from this function, we want to the tensor + # to be in the correct state, that is the final state after the + # operation occured. So we synchronize. + torch.cuda.synchronize() + +def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value) +def arange(A, device=None): elementwise_func('arange', A, None, 0) +def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0) + + +def create_linear_map(signed=True, total_bits=8, add_zero=True): + sign = (-1.0 if signed else 0.0) + total_values = 2**total_bits + if add_zero or total_bits < 8: + # add a zero + # since we simulate less bits by having zeros in the data type, we + # we need to center the quantization around zero and as such lose + # a single value + total_values = (2**total_bits if not signed else 2**total_bits-1) + + values = torch.linspace(sign, 1.0, total_values) + gap = 256 - values.numel() + if gap == 0: + return values + else: + l = values.numel()//2 + return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist()) + +def create_normal_map(offset=0.9677083, use_extra_value=True): + + if use_extra_value: + # one more positive value, this is an asymmetric type + v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist() + v2 = [0]*(256-15) ## we have 15 non-zero values in this data type + v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist() + else: + v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist() + v2 = [0]*(256-14) ## we have 14 non-zero values in this data type + v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist() + + v = v1 + v2 + v3 + + values = torch.Tensor(v) + values = values.sort().values + values /= values.max() + + assert values.numel() == 256 + + return values + +def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8): + e = exponent_bits + p = precision_bits + has_sign = 1 if signed else 0 + assert e+p == total_bits-has_sign + # the exponent is biased to 2^(e-1) -1 == 0 + evalues = [] + pvalues = [] + for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)): + evalues.append(2**val) + + + values = [] + lst = list(itertools.product([0, 1], repeat=precision_bits)) + #for ev in evalues: + bias = 2**(exponent_bits-1) + for evalue in range(2**(exponent_bits)): + for bit_pattern in lst: + value = (1 if evalue != 0 else 0) + for i, pval in enumerate(list(bit_pattern)): + value += pval*(2**-(i+1)) + if evalue == 0: + # subnormals + value = value*2**-(bias) + else: + # normals + value = value*2**-(evalue-bias-1) + values.append(value) + if signed: + values.append(-value) + + + assert len(values) == 2**total_bits + values.sort() + if total_bits < 8: + gap = 256 - len(values) + for i in range(gap): + values.append(0) + values.sort() + code = torch.Tensor(values) + code /= code.max() + + return code + + + +def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8): + """ + Creates the dynamic quantiztion map. + + The dynamic data type is made up of a dynamic exponent and + fraction. As the exponent increase from 0 to -7 the number + of bits available for the fraction shrinks. + + This is a generalization of the dynamic type where a certain + number of the bits and be reserved for the linear quantization + region (the fraction). n determines the maximum number of + exponent bits. + + For more details see + (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561] + """ + + data = [] + # these are additional items that come from the case + # where all the exponent bits are zero and no + # indicator bit is present + non_sign_bits = total_bits - (1 if signed else 0) + additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1 + if not signed: + additional_items = 2 * additional_items + for i in range(max_exponent_bits): + fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1)) + boundaries = torch.linspace(0.1, 1, fraction_items) + means = (boundaries[:-1] + boundaries[1:]) / 2.0 + data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() + if signed: + data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() + + if additional_items > 0: + boundaries = torch.linspace(0.1, 1, additional_items + 1) + means = (boundaries[:-1] + boundaries[1:]) / 2.0 + data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() + if signed: + data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist() + + data.append(0) + data.append(1.0) + + gap = 256 - len(data) + for i in range(gap): + data.append(0) + + data.sort() + return Tensor(data) + +def create_quantile_map(A, total_bits=8): + q = estimate_quantiles(A, num_quantiles=2**total_bits-1) + q = q.tolist() + q.append(0) + + gap = 256 - len(q) + for i in range(gap): + q.append(0) + + q.sort() + + q = Tensor(q) + q = q/q.abs().max() + return q + +def get_special_format_str(): + if not torch.cuda.is_available(): return 'col_turing' + major, _minor = torch.cuda.get_device_capability() + if major <= 7: + return "col_turing" + if major == 8: + return "col_ampere" + return "col_turing" + + + +def is_on_gpu(tensors): + on_gpu = True + gpu_ids = set() + for t in tensors: + if t is None: continue # NULL pointers are fine + is_paged = getattr(t, 'is_paged', False) + on_gpu &= (t.device.type == 'cuda' or is_paged) + if not is_paged: + gpu_ids.add(t.device.index) + if not on_gpu: + raise TypeError(f'All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\n {[(t.shape, t.device) for t in tensors]}') + if len(gpu_ids) > 1: + raise TypeError(f'Input tensors need to be on the same GPU, but found the following tensor and device combinations:\n {[(t.shape, t.device) for t in tensors]}') + return on_gpu + +def get_ptr(A: Tensor) -> ct.c_void_p: + """ + Get the ctypes pointer from a PyTorch Tensor. + + Parameters + ---------- + A : torch.tensor + The PyTorch tensor. + + Returns + ------- + ctypes.c_void_p + """ + if A is None: + return None + else: + return ct.c_void_p(A.data.data_ptr()) + + +def pre_call(device): + prev_device = torch.cuda.current_device() + torch.cuda.set_device(device) + return prev_device + + +def post_call(prev_device): + torch.cuda.set_device(prev_device) + + +def get_transform_func(dtype, orderA, orderOut, transpose=False): + name = f'ctransform_{(8 if dtype == torch.int8 else 32)}_{orderA}_to_{orderOut}_{"t" if transpose else "n"}' + if not hasattr(lib, name): + print(name) + raise ValueError( + f"Transform function not supported: {orderA} to {orderOut} for data type {dtype} and transpose={transpose}" + ) + else: + return getattr(lib, name) + + +def get_transform_buffer( + shape, dtype, device, to_order, from_order="row", transpose=False +): + # init_func = torch.empty + init_func = torch.zeros + dims = len(shape) + + if dims == 2: + rows = shape[0] + elif dims == 3: + rows = shape[0] * shape[1] + cols = shape[-1] + + state = (shape, to_order) + if transpose: + # swap dims + tmp = rows + rows = cols + cols = tmp + state = (shape[::-1], to_order) + + if to_order == "row" or to_order == "col": + return init_func(shape, dtype=dtype, device=device), state + elif to_order == "col32": + # blocks of 32 columns (padded) + cols = 32 * ((cols + 31) // 32) + return init_func((rows, cols), dtype=dtype, device=device), state + elif to_order == "col_turing": + # blocks of 32 columns and 8 rows + cols = 32 * ((cols + 31) // 32) + rows = 8 * ((rows + 7) // 8) + return init_func((rows, cols), dtype=dtype, device=device), state + elif to_order == "col_ampere": + # blocks of 32 columns and 32 rows + cols = 32 * ((cols + 31) // 32) + rows = 32 * ((rows + 31) // 32) + return init_func((rows, cols), dtype=dtype, device=device), state + else: + raise NotImplementedError(f"To_order not supported: {to_order}") + + +def nvidia_transform( + A, + to_order, + from_order="row", + out=None, + transpose=False, + state=None, + ld=None, +): + if state is None: + state = (A.shape, from_order) + else: + from_order = state[1] + if out is None: + out, new_state = get_transform_buffer( + state[0], A.dtype, A.device, to_order, state[1] + ) + else: + new_state = (state[1], to_order) + func = get_transform_func(A.dtype, from_order, to_order, transpose) + + shape = state[0] + if len(shape) == 2: + dim1 = ct.c_int32(shape[0]) + dim2 = ct.c_int32(shape[1]) + elif ld is not None: + n = prod(shape) + dim1 = prod([shape[i] for i in ld]) + dim2 = ct.c_int32(n // dim1) + dim1 = ct.c_int32(dim1) + else: + dim1 = ct.c_int32(shape[0] * shape[1]) + dim2 = ct.c_int32(shape[2]) + + ptr = CUBLAS_Context.get_instance().get_context(A.device) + func(ptr, get_ptr(A), get_ptr(out), dim1, dim2) + + return out, new_state + + +def estimate_quantiles(A: Tensor, out: Tensor = None, offset: float = 1 / 512, num_quantiles=256) -> Tensor: + ''' + Estimates 256 equidistant quantiles on the input tensor eCDF. + + Uses SRAM-Quantiles algorithm to quickly estimate 256 equidistant quantiles + via the eCDF of the input tensor `A`. This is a fast but approximate algorithm + and the extreme quantiles close to 0 and 1 have high variance / large estimation + errors. These large errors can be avoided by using the offset variable which trims + the distribution. The default offset value of 1/512 ensures minimum entropy encoding -- it + trims 1/512 = 0.2% from each side of the distrivution. An offset value of 0.01 to 0.02 + usually has a much lower error but is not a minimum entropy encoding. Given an offset + of 0.02 equidistance points in the range [0.02, 0.98] are used for the quantiles. + + Parameters + ---------- + A : torch.Tensor + The input tensor. Any shape. + out : torch.Tensor + Tensor with the 256 estimated quantiles. + offset : float + The offset for the first and last quantile from 0 and 1. Default: 1/(2*num_quantiles) + num_quantiles : int + The number of equally spaced quantiles. + + Returns + ------- + torch.Tensor: + The 256 quantiles in float32 datatype. + ''' + if A.numel() < 256: raise NotImplementedError(f'Quantile estimation needs at least 256 values in the Tensor, but Tensor had only {A.numel()} values.') + if num_quantiles > 256: raise NotImplementedError(f"Currently only a maximum of 256 equally spaced quantiles are supported, but the argument num_quantiles={num_quantiles}") + if num_quantiles < 256 and offset == 1/(512): + # override default arguments + offset = 1/(2*num_quantiles) + + if out is None: out = torch.zeros((256,), dtype=torch.float32, device=A.device) + is_on_gpu([A, out]) + device = pre_call(A.device) + if A.dtype == torch.float32: + lib.cestimate_quantiles_fp32(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel())) + elif A.dtype == torch.float16: + lib.cestimate_quantiles_fp16(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel())) + else: + raise NotImplementedError(f"Not supported data type {A.dtype}") + post_call(device) + + if num_quantiles < 256: + step = round(256/num_quantiles) + idx = torch.linspace(0, 255, num_quantiles).long().to(A.device) + out = out[idx] + + return out + + +def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor: + """ + Quantize tensor A in blocks of size 4096 values. + + Quantizes tensor A by dividing it into blocks of 4096 values. + Then the absolute maximum value within these blocks is calculated + for the non-linear quantization. + + Parameters + ---------- + A : torch.Tensor + The input tensor. + code : torch.Tensor + The quantization map. + absmax : torch.Tensor + The absmax values. + out : torch.Tensor + The output tensor (8-bit). + + Returns + ------- + torch.Tensor: + The 8-bit tensor. + tuple(torch.Tensor, torch.Tensor): + The quantization state to undo the quantization. + """ + + + if code is None: + if "dynamic" not in name2qmap: + name2qmap["dynamic"] = create_dynamic_map().to(A.device) + code = name2qmap["dynamic"] + + if absmax is None: + n = A.numel() + blocks = n // blocksize + blocks += 1 if n % blocksize > 0 else 0 + absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32) + + if out is None: + out = torch.zeros_like(A, dtype=torch.uint8) + + if A.device.type != 'cpu': + assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64] + cblocksize = ct.c_int32(blocksize) + prev_device = pre_call(A.device) + code = code.to(A.device) + is_on_gpu([code, A, out, absmax]) + if A.dtype == torch.float32: + lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel())) + elif A.dtype == torch.float16: + lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel())) + elif A.dtype == torch.bfloat16: + lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel())) + else: + raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}") + post_call(A.device) + else: + # cpu + code = code.cpu() + lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel())) + + if nested: + offset = absmax.mean() + absmax -= offset + qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False) + state = [qabsmax, code, blocksize, nested, A.dtype, offset, state2] + else: + state = [absmax, code, blocksize, nested, A.dtype, None, None] + + return out, state + + +def dequantize_blockwise( + A: Tensor, + quant_state: Tuple[Tensor, Tensor] = None, + absmax: Tensor = None, + code: Tensor = None, + out: Tensor = None, + blocksize: int = 4096, + nested=False +) -> Tensor: + """ + Dequantizes blockwise quantized values. + + Dequantizes the tensor A with maximum absolute values absmax in + blocks of size 4096. + + Parameters + ---------- + A : torch.Tensor + The input 8-bit tensor. + quant_state : tuple(torch.Tensor, torch.Tensor) + Tuple of code and absmax values. + absmax : torch.Tensor + The absmax values. + code : torch.Tensor + The quantization map. + out : torch.Tensor + Dequantized output tensor (default: float32) + + + Returns + ------- + torch.Tensor: + Dequantized tensor (default: float32) + """ + assert quant_state is not None or absmax is not None + if code is None and quant_state is None: + if "dynamic" not in name2qmap: + name2qmap["dynamic"] = create_dynamic_map().to(A.device) + code = name2qmap["dynamic"] + + if quant_state is None: + quant_state = (absmax, code, blocksize, False, torch.float32, None, None) + + absmax, code, blocksize, nested, dtype, offset, state2 = quant_state + + if nested: + absmax = dequantize_blockwise(absmax, state2) + absmax += offset + if absmax.dtype != torch.float32: absmax = absmax.float() + + if out is None: + out = torch.empty(A.shape, dtype=dtype, device=A.device) + + if A.device.type != 'cpu': + device = pre_call(A.device) + code = code.to(A.device) + if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]: + raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]") + is_on_gpu([A, absmax, out]) + if out.dtype == torch.float32: + lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel())) + elif out.dtype == torch.float16: + lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel())) + elif out.dtype == torch.bfloat16: + lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel())) + else: + raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}") + post_call(A.device) + else: + code = code.cpu() + lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel())) + + return out + +def get_4bit_type(typename, device=None, blocksize=64): + if device is None: device = 'cuda' + data = None + if typename == 'nf4': + ''' Implements the NF4 data type. + + Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that + is normalized into the range [-1, 1]. + + For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314) + + Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in + the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236. + ''' + data = [-1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453, -0.28444138169288635, + -0.18477343022823334, -0.09105003625154495, 0.0, 0.07958029955625534, 0.16093020141124725, + 0.24611230194568634, 0.33791524171829224, 0.44070982933044434, 0.5626170039176941, + 0.7229568362236023, 1.0] + elif typename == 'fp4': + # 0b000 = 0 + # 0b001 = 0.0625 + # 0b010 = 8 + # 0b011 = 12 + # 0b100 = 4 + # 0b101 = 6 + # 0b110 = 2 + # 0b111 = 3 + # can also be created with bnb.functional.create_fp8_map(signed=True, exponent_bits=2, precision_bits=1, total_bits=4) + data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0] + elif typename == 'int4': + data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7] + elif typename == 'af4': + # Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good) + # https://arxiv.org/abs/2306.06965 + if blocksize == 64: + data = [-1., -0.69441008, -0.51243739, -0.3736951, -0.25607552, -0.14982478, + -0.04934812, 0., 0.04273164, 0.12934483, 0.21961274, 0.31675666, + 0.42563882, 0.55496234, 0.72424863, 1.][::-1] + else: + raise NotImplementedError(f'4-bit AbnormalFloats currently only support blocksize 64.') + + if data is None: + raise NotImplementedError(f'Typename {typename} not supported') + + data = Tensor(data) + data /= data.abs().max() + assert data.numel() == 16 + + return data.to(device) + + + +def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False): + return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4') + +def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False): + return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4') + +def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor: + """ + Quantize tensor A in blocks of 4-bit values. + + Quantizes tensor A by dividing it into blocks which are independently quantized to FP4. + + Parameters + ---------- + A : torch.Tensor + The input tensor. + absmax : torch.Tensor + The absmax values. + out : torch.Tensor + The output tensor (8-bit). + blocksize : int + The blocksize used in quantization. + quant_type : str + The 4-bit quantization data type {fp4, nf4} + + Returns + ------- + torch.Tensor: + The 8-bit tensor with packed 4-bit values. + tuple(torch.Tensor, torch.Size, torch.dtype, int): + The quantization state to undo the quantization. + """ + if A.device.type != 'cuda': + raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}') + if quant_type not in ['fp4', 'nf4']: + raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.') + + n = A.numel() + input_shape = A.shape + + if absmax is None: + blocks = n // blocksize + blocks += 1 if n % blocksize > 0 else 0 + absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32) + + + if out is None: + out = torch.zeros(((n+1)//2, 1), dtype=torch.uint8, device=A.device) + + assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64] + + prev_device = pre_call(A.device) + is_on_gpu([A, out, absmax]) + + if A.dtype == torch.float32: + if quant_type == 'fp4': + lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n)) + else: + lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n)) + elif A.dtype == torch.float16: + if quant_type == 'fp4': + lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n)) + else: + lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n)) + elif A.dtype == torch.bfloat16: + if quant_type == 'fp4': + lib.cquantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n)) + else: + lib.cquantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n)) + else: + raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}") + post_call(A.device) + + datatype = get_4bit_type(quant_type, device=A.device) + + if compress_statistics: + offset = absmax.mean() + absmax -= offset + qabsmax, state2 = quantize_blockwise(absmax, blocksize=256) + del absmax + state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type, datatype] + else: + state = [absmax, input_shape, A.dtype, blocksize, None, quant_type, datatype] + + return out, state + +def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor: + return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4') + +def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor: + return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4') + +def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor: + """ + Dequantizes FP4 blockwise quantized values. + + Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize. + + Parameters + ---------- + A : torch.Tensor + The input 8-bit tensor (packed 4-bit values). + quant_state : tuple(torch.Tensor, torch.Size, torch.dtype) + Tuple of absmax values, original tensor shape and original dtype. + absmax : torch.Tensor + The absmax values. + out : torch.Tensor + Dequantized output tensor. + blocksize : int + The blocksize used in quantization. + quant_type : str + The 4-bit quantization data type {fp4, nf4} + + + Returns + ------- + torch.Tensor: + Dequantized tensor. + """ + if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]: + raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]") + if quant_type not in ['fp4', 'nf4']: + raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.') + + if quant_state is None: + assert absmax is not None and out is not None + shape = out.shape + dtype = out.dtype + else: + absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state + + + if compressed_stats is not None: + offset, state2 = compressed_stats + absmax = dequantize_blockwise(absmax, state2) + absmax += offset + if absmax.dtype != torch.float32: absmax = absmax.float() + + if out is None: + out = torch.empty(shape, dtype=dtype, device=A.device) + + n = out.numel() + + + device = pre_call(A.device) + is_on_gpu([A, absmax, out]) + if out.dtype == torch.float32: + if quant_type == 'fp4': + lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n)) + else: + lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n)) + elif out.dtype == torch.float16: + if quant_type == 'fp4': + lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n)) + else: + lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n)) + elif out.dtype == torch.bfloat16: + if quant_type == 'fp4': + lib.cdequantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n)) + else: + lib.cdequantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n)) + else: + raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}") + post_call(A.device) + + is_transposed = (True if A.shape[0] == 1 else False) + if is_transposed: return out.t() + else: return out + + +def quantize(A: Tensor, code: Tensor = None, out: Tensor = None) -> Tensor: + if code is None: + if "dynamic" not in name2qmap: + name2qmap["dynamic"] = create_dynamic_map().to(A.device) + code = name2qmap["dynamic"] + code = code.to(A.device) + + absmax = torch.abs(A).max() + if absmax.dtype != torch.float32: absmax = absmax.float() + inp = A / absmax + out = quantize_no_absmax(inp, code, out) + return out, (absmax, code) + + +def dequantize( + A: Tensor, + quant_state: Tuple[Tensor, Tensor] = None, + absmax: Tensor = None, + code: Tensor = None, + out: Tensor = None, +) -> Tensor: + assert quant_state is not None or absmax is not None + if code is None and quant_state is None: + if "dynamic" not in name2qmap: + name2qmap["dynamic"] = create_dynamic_map().to(A.device) + code = name2qmap["dynamic"] + code = code.to(A.device) + + if quant_state is None: + quant_state = (absmax, code) + out = dequantize_no_absmax(A, quant_state[1], out) + return out * quant_state[0] + + +def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor: + ''' + Quantizes input tensor to 8-bit. + + Quantizes the 32-bit input tensor `A` to the 8-bit output tensor + `out` using the quantization map `code`. + + Parameters + ---------- + A : torch.Tensor + The input tensor. + code : torch.Tensor + The quantization map. + out : torch.Tensor, optional + The output tensor. Needs to be of type byte. + + Returns + ------- + torch.Tensor: + Quantized 8-bit tensor. + ''' + prev_device = pre_call(A.device) + if out is None: out = torch.zeros_like(A, dtype=torch.uint8) + is_on_gpu([A, out]) + lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel())) + post_call(prev_device) + return out + + +def dequantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor: + ''' + Dequantizes the 8-bit tensor to 32-bit. + + Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via + the quantization map `code`. + + Parameters + ---------- + A : torch.Tensor + The 8-bit input tensor. + code : torch.Tensor + The quantization map. + out : torch.Tensor + The 32-bit output tensor. + + Returns + ------- + torch.Tensor: + 32-bit output tensor. + ''' + prev_device = pre_call(A.device) + if out is None: out = torch.zeros_like(A, dtype=torch.float32) + is_on_gpu([code, A, out]) + lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel())) + post_call(prev_device) + return out + + +def optimizer_update_32bit( + optimizer_name: str, + g: Tensor, + p: Tensor, + state1: Tensor, + beta1: float, + eps: float, + step: int, + lr: float, + state2: Tensor = None, + beta2: float = 0.0, + weight_decay: float = 0.0, + gnorm_scale: float = 1.0, + unorm_vec: Tensor = None, + max_unorm: float = 0.0, + skip_zeros=False, +) -> None: + """ + Performs an inplace optimizer update with one or two optimizer states. + + Universal optimizer update for 32-bit state and 32/16-bit gradients/weights. + + Parameters + ---------- + optimizer_name : str + The name of the optimizer: {adam}. + g : torch.Tensor + Gradient tensor. + p : torch.Tensor + Parameter tensor. + state1 : torch.Tensor + Optimizer state 1. + beta1 : float + Optimizer beta1. + eps : float + Optimizer epsilon. + weight_decay : float + Weight decay. + step : int + Current optimizer step. + lr : float + The learning rate. + state2 : torch.Tensor + Optimizer state 2. + beta2 : float + Optimizer beta2. + gnorm_scale : float + The factor to rescale the gradient to the max clip value. + unorm_vec : torch.Tensor + The tensor for the update norm. + max_unorm : float + The maximum update norm relative to the weight norm. + skip_zeros : bool + Whether to skip zero-valued gradients or not (default: False). + """ + + param_norm = 0.0 + if max_unorm > 0.0: + param_norm = torch.norm(p.data.float()) + + + optim_func = None + if g.dtype == torch.float32: + optim_func = str2optimizer32bit[optimizer_name][0] + elif g.dtype == torch.float16: + optim_func = str2optimizer32bit[optimizer_name][1] + elif (g.dtype == torch.bfloat16 and len(str2optimizer32bit[optimizer_name])==3): + optim_func = str2optimizer32bit[optimizer_name][2] + else: + raise ValueError(f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}") + + is_on_gpu([g, p, state1, state2, unorm_vec]) + prev_device = pre_call(g.device) + optim_func( + get_ptr(g), + get_ptr(p), + get_ptr(state1), + get_ptr(state2), + get_ptr(unorm_vec), + ct.c_float(max_unorm), + ct.c_float(param_norm), + ct.c_float(beta1), + ct.c_float(beta2), + ct.c_float(eps), + ct.c_float(weight_decay), + ct.c_int32(step), + ct.c_float(lr), + ct.c_float(gnorm_scale), + ct.c_bool(skip_zeros), + ct.c_int32(g.numel())) + post_call(prev_device) + + +def optimizer_update_8bit( + optimizer_name: str, + g: Tensor, + p: Tensor, + state1: Tensor, + state2: Tensor, + beta1: float, + beta2: float, + eps: float, + step: int, + lr: float, + qmap1: Tensor, + qmap2: Tensor, + max1: Tensor, + max2: Tensor, + new_max1: Tensor, + new_max2: Tensor, + weight_decay: float = 0.0, + gnorm_scale: float = 1.0, + unorm_vec: Tensor = None, + max_unorm: float = 0.0, +) -> None: + """ + Performs an inplace Adam update. + + Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights. + Uses AdamW formulation if weight decay > 0.0. + + Parameters + ---------- + optimizer_name : str + The name of the optimizer. Choices {adam, momentum} + g : torch.Tensor + Gradient tensor. + p : torch.Tensor + Parameter tensor. + state1 : torch.Tensor + Adam state 1. + state2 : torch.Tensor + Adam state 2. + beta1 : float + Adam beta1. + beta2 : float + Adam beta2. + eps : float + Adam epsilon. + weight_decay : float + Weight decay. + step : int + Current optimizer step. + lr : float + The learning rate. + qmap1 : torch.Tensor + Quantization map for first Adam state. + qmap2 : torch.Tensor + Quantization map for second Adam state. + max1 : torch.Tensor + Max value for first Adam state update. + max2 : torch.Tensor + Max value for second Adam state update. + new_max1 : torch.Tensor + Max value for the next Adam update of the first state. + new_max2 : torch.Tensor + Max value for the next Adam update of the second state. + gnorm_scale : float + The factor to rescale the gradient to the max clip value. + unorm_vec : torch.Tensor + The tensor for the update norm. + max_unorm : float + The maximum update norm relative to the weight norm. + """ + + param_norm = 0.0 + if max_unorm > 0.0: + param_norm = torch.norm(p.data.float()) + + prev_device = pre_call(g.device) + is_on_gpu([g, p, state1, state2, unorm_vec, qmap1, qmap2, max1, max2, new_max1, new_max2]) + if g.dtype == torch.float32 and state1.dtype == torch.uint8: + str2optimizer8bit[optimizer_name][0]( + get_ptr(p), + get_ptr(g), + get_ptr(state1), + get_ptr(state2), + get_ptr(unorm_vec), + ct.c_float(max_unorm), + ct.c_float(param_norm), + ct.c_float(beta1), + ct.c_float(beta2), + ct.c_float(eps), + ct.c_int32(step), + ct.c_float(lr), + get_ptr(qmap1), + get_ptr(qmap2), + get_ptr(max1), + get_ptr(max2), + get_ptr(new_max1), + get_ptr(new_max2), + ct.c_float(weight_decay), + ct.c_float(gnorm_scale), + ct.c_int32(g.numel()), + ) + elif g.dtype == torch.float16 and state1.dtype == torch.uint8: + str2optimizer8bit[optimizer_name][1]( + get_ptr(p), + get_ptr(g), + get_ptr(state1), + get_ptr(state2), + get_ptr(unorm_vec), + ct.c_float(max_unorm), + ct.c_float(param_norm), + ct.c_float(beta1), + ct.c_float(beta2), + ct.c_float(eps), + ct.c_int32(step), + ct.c_float(lr), + get_ptr(qmap1), + get_ptr(qmap2), + get_ptr(max1), + get_ptr(max2), + get_ptr(new_max1), + get_ptr(new_max2), + ct.c_float(weight_decay), + ct.c_float(gnorm_scale), + ct.c_int32(g.numel()), + ) + else: + raise ValueError( + f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}" + ) + post_call(prev_device) + + +def optimizer_update_8bit_blockwise( + optimizer_name: str, + g: Tensor, + p: Tensor, + state1: Tensor, + state2: Tensor, + beta1: float, + beta2: float, + eps: float, + step: int, + lr: float, + qmap1: Tensor, + qmap2: Tensor, + absmax1: Tensor, + absmax2: Tensor, + weight_decay: float = 0.0, + gnorm_scale: float = 1.0, + skip_zeros=False, +) -> None: + + optim_func = None + prev_device = pre_call(g.device) + is_on_gpu([g, p, state1, state2, qmap1, qmap2, absmax1, absmax2]) + if g.dtype == torch.float32 and state1.dtype == torch.uint8: + optim_func = str2optimizer8bit_blockwise[optimizer_name][0] + elif g.dtype == torch.float16 and state1.dtype == torch.uint8: + optim_func = str2optimizer8bit_blockwise[optimizer_name][1] + elif (g.dtype == torch.bfloat16 and state1.dtype == torch.uint8 and + len(str2optimizer8bit_blockwise[optimizer_name])==3): + optim_func = str2optimizer8bit_blockwise[optimizer_name][2] + else: + raise ValueError( + f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}" + ) + post_call(prev_device) + + is_on_gpu([p, g, state1, state2, qmap1, qmap2, absmax1, absmax2]) + + prev_device = pre_call(g.device) + optim_func( + get_ptr(p), + get_ptr(g), + get_ptr(state1), + get_ptr(state2), + ct.c_float(beta1), + ct.c_float(beta2), + ct.c_float(eps), + ct.c_int32(step), + ct.c_float(lr), + get_ptr(qmap1), + get_ptr(qmap2), + get_ptr(absmax1), + get_ptr(absmax2), + ct.c_float(weight_decay), + ct.c_float(gnorm_scale), + ct.c_bool(skip_zeros), + ct.c_int32(g.numel()), + ) + post_call(prev_device) + +def percentile_clipping( + grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5 +): + """Applies percentile clipping + + grad: torch.Tensor + The gradient tensor. + gnorm_vec: torch.Tensor + Vector of gradient norms. 100 elements expected. + step: int + The current optimiation steps (number of past gradient norms). + + """ + prev_device = pre_call(grad.device) + is_on_gpu([grad, gnorm_vec]) + if grad.dtype == torch.float32: + lib.cpercentile_clipping_g32( + get_ptr(grad), + get_ptr(gnorm_vec), + ct.c_int32(step), + ct.c_int32(grad.numel()), + ) + elif grad.dtype == torch.float16: + lib.cpercentile_clipping_g16( + get_ptr(grad), + get_ptr(gnorm_vec), + ct.c_int32(step), + ct.c_int32(grad.numel()), + ) + else: + raise ValueError(f"Gradient type {grad.dtype} not supported!") + post_call(prev_device) + + current_gnorm = torch.sqrt(gnorm_vec[step % 100]) + vals, idx = torch.sort(gnorm_vec) + clip_value = torch.sqrt(vals[percentile]) + gnorm_scale = 1.0 + + if current_gnorm > clip_value: + gnorm_scale = clip_value / current_gnorm + + return current_gnorm, clip_value, gnorm_scale + + +def histogram_scatter_add_2d( + histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor +): + assert len(histogram.shape) == 2 + assert histogram.dtype == torch.float32 + assert source.dtype == torch.float32 + assert index1.dtype == torch.int32 + assert index2.dtype == torch.int32 + + assert histogram.device.type == "cuda" + assert index1.device.type == "cuda" + assert index2.device.type == "cuda" + assert source.device.type == "cuda" + + maxdim1 = ct.c_int32(histogram.shape[0]) + n = ct.c_int32(index1.numel()) + is_on_gpu([histogram, index1, index2, source]) + lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n) + +def check_matmul(A, B, out, transposed_A, transposed_B, expected_type=torch.int8): + if not torch.cuda.is_initialized(): torch.cuda.init() + if A.dtype != expected_type or B.dtype != expected_type: + raise TypeError( + f"Expected torch.int8 input tensors A and B, but got {A.dtype} and {B.dtype}" + ) + + sA = A.shape + sB = B.shape + tA = transposed_A + tB = transposed_B + + correct = True + + if len(sA) == 2 and len(sB) == 2: + if not tA and not tB and A.shape[1] != B.shape[0]: + correct = False + elif tA and not tB and A.shape[0] != B.shape[0]: + correct = False + elif tA and tB and A.shape[0] != B.shape[1]: + correct = False + elif not tA and tB and A.shape[1] != B.shape[1]: + correct = False + elif len(sA) == 3 and len(sB) == 2: + if not tA and not tB and A.shape[2] != B.shape[0]: + correct = False + elif tA and not tB and A.shape[1] != B.shape[0]: + correct = False + elif tA and tB and A.shape[1] != B.shape[1]: + correct = False + elif not tA and tB and A.shape[2] != B.shape[1]: + correct = False + elif len(sA) == 3 and len(sB) == 3: + if not tA and not tB and A.shape[2] != B.shape[1]: + correct = False + elif tA and not tB and A.shape[1] != B.shape[1]: + correct = False + elif tA and tB and A.shape[1] != B.shape[2]: + correct = False + elif not tA and tB and A.shape[2] != B.shape[2]: + correct = False + + if out is not None: + sout = out.shape + # special case common in backprop + if not correct and len(sA) == 3 and len(sB) == 3: + if ( + sout[0] == sA[2] + and sout[1] == sB[2] + and sA[0] == sB[0] + and sA[1] == sB[1] + ): + correct = True + else: + if len(sA) == 2 and len(sB) == 2: + if not tA and not tB: + sout = (sA[0], sB[1]) + elif tA and tB: + sout = (sA[1], sB[0]) + elif tA and not tB: + sout = (sA[1], sB[1]) + elif not tA and tB: + sout = (sA[0], sB[0]) + elif len(sA) == 3 and len(sB) == 2: + if not tA and not tB: + sout = (sA[0], sA[1], sB[1]) + elif tA and tB: + sout = (sA[0], sA[2], sB[0]) + elif tA and not tB: + sout = (sA[0], sA[2], sB[1]) + elif not tA and tB: + sout = (sA[0], sA[1], sB[0]) + elif len(sA) == 3 and len(sB) == 3: + if not tA and not tB: + sout = (sA[0], sA[1], sB[2]) + elif tA and tB: + sout = (sA[0], sA[2], sB[1]) + elif tA and not tB: + sout = (sA[0], sA[2], sB[2]) + elif not tA and tB: + sout = (sA[0], sA[1], sB[1]) + + if not correct: + raise ValueError( + f"Tensor dimensions incorrect for matrix mulitiplication: A x B: {sA} x {sB} with transpose for A x B: {tA} x {tB}." + ) + + return sout + +def gemv_4bit( + A: Tensor, + B: Tensor, + out: Tensor = None, + transposed_A=False, + transposed_B=False, + state=None +): + prev_device = pre_call(A.device) + #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype) + if state is None: + raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )') + + if A.numel() != A.shape[-1]: + raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]') + + Bshape = state[1] + bout = Bshape[0] + absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state + if compressed_stats is not None: + offset, state2 = compressed_stats + absmax = dequantize_blockwise(absmax, state2) + absmax += offset + + if out is None: + if len(A.shape) == 3: + out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device) + else: + out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device) + + n = 1 + m = Bshape[0] + k = Bshape[1] + lda = Bshape[0] + ldc = Bshape[0] + ldb = (A.shape[-1]+1)//2 + is_on_gpu([B, A, out, absmax, state[-1]]) + m = ct.c_int32(m) + n = ct.c_int32(n) + k = ct.c_int32(k) + lda = ct.c_int32(lda) + ldb = ct.c_int32(ldb) + ldc = ct.c_int32(ldc) + + if B.dtype == torch.uint8: + if A.dtype == torch.float16: + lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3])) + elif A.dtype == torch.bfloat16: + lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3])) + elif A.dtype == torch.float32: + lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3])) + else: + raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}') + + else: + raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}') + + post_call(prev_device) + + return out + +def igemm( + A: Tensor, + B: Tensor, + out: Tensor = None, + transposed_A=False, + transposed_B=False, +): + sout = check_matmul(A, B, out, transposed_A, transposed_B) + if out is None: + out = torch.zeros(size=sout, dtype=torch.int32, device=A.device) + if len(A.shape) == 3 and len(B.shape) == 3: + if A.shape[0] == B.shape[0] and A.shape[2] == B.shape[1]: + return batched_igemm(A, B, out) + + sA = A.shape + sB = B.shape + if transposed_A and len(sA) == 2: + sA = (sA[1], sA[0]) + elif transposed_A and len(sA) == 3: + sA = (sA[0], sA[2], sA[0]) + if transposed_B and len(sB) == 2: + sB = (sB[1], sB[0]) + elif transposed_B and len(sB) == 3: + sB = (sB[0], sB[2], sB[0]) + # this is a mess: cuBLAS expect column major, but PyTorch is row major. + # So to perform the matrix multiplication, we have to treat A, B, and C matrices + # (transpose of row major is column major) + # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these + + # matrices in the input arguments for cuBLAS + # column major: A @ B = C: [m, k] @ [k, n] = [m, n] + # row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n] + # column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m] + if len(sB) == 2: + if B.stride()[0] == B.shape[1]: + transposed_B = False + elif B.stride()[1] == B.shape[0]: + transposed_B = True + if len(A.shape) == 2: + if A.stride()[0] == A.shape[1]: + transposed_A = False + elif A.stride()[1] == A.shape[0]: + transposed_A = True + else: + if A.stride()[1] == A.shape[2]: + transposed_A = False + elif A.stride()[2] == A.shape[1]: + transposed_A = True + + if len(sA) == 2: + n = sA[0] + ldb = A.stride()[1 if transposed_A else 0] + elif len(sA) == 3 and len(sB) == 2: + n = sA[0] * sA[1] + ldb = sA[2] + + m = sB[1] + k = sB[0] + lda = B.stride()[(1 if transposed_B else 0)] + ldc = sB[1] + elif len(sB) == 3: + # special case + assert len(sA) == 3 + if not (sA[0] == sB[0] and sA[1] == sB[1]): + raise ValueError( + f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}" + ) + + transposed_A = True + transposed_B = False + + m = sB[2] + n = sA[2] + k = sB[0] * sB[1] + + lda = m + ldb = sA[2] + ldc = m + + ptr = CUBLAS_Context.get_instance().get_context(A.device) + + # B^T @ A^T = C^T + # [km, nk -> mn] + is_on_gpu([B, A, out]) + lib.cigemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k), + get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc)) + return out + + +def batched_igemm( + A: Tensor, + B: Tensor, + out: Tensor = None, + transposed_A=False, + transposed_B=False, +): + if not len(A.shape) == 3 or not len(B.shape) == 3: + raise ValueError( + f"Expected 3-dimensional tensors for bmm, but got shapes A and B: {A.shape} and {B.shape}" + ) + sout = check_matmul(A, B, out, transposed_A, transposed_B) + if out is None: + out = torch.zeros(size=sout, dtype=torch.int32, device=A.device) + + if B.is_contiguous(): + lda = B.stride()[1] + transposed_A = False + else: + s = B.stride() + if s[0] != B.shape[0]: + B = B.contiguous() + lda = B.stride()[1] + elif s[2] == B.shape[1]: + transposed_A = True + lda = B.stride()[2] + else: + if s[2] == 1: + B = B.contiguous() + lda = B.stride()[1] + elif s[1] == 1: + B = B.contiguous() + lda = B.stride()[1] + else: + B = B.contiguous() + lda = B.stride()[1] + + if A.is_contiguous(): + ldb = A.stride()[1] + transposed_B = False + else: + s = A.stride() + if s[0] != A.shape[0]: + A = A.contiguous() + ldb = A.stride()[1] + transposed_B = False + elif s[2] == A.shape[1]: + ldb = A.stride()[2] + transposed_B = True + else: + A = A.contiguous() + ldb = A.stride()[1] + transposed_B = False + + # this is a mess: cuBLAS expect column major, but PyTorch is row major. + # So to perform the matrix multiplication, we have to treat A, B, and C matrices + # (transpose of row major is column major) + # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these + # matrices in the input arguments for cuBLAS + + # column major: A @ B = C: [batch, m, k] @ [batch, k, n] = [batch, m, n] + # row major: B^T @ A^T = C^T: [batch, m, k] @ [batch, k, n] = [batch, m, n] + # column major with row major layout: B^T @ A^T = C^T: [batch, k, m] @ [batch, n, k] = [batch, n, m] + num_batch = A.shape[0] + n = A.shape[1] + m = B.shape[2] + k = B.shape[1] + + ldc = m + + strideA = B.shape[1] * B.shape[2] + strideB = A.shape[1] * A.shape[2] + strideC = A.shape[1] * B.shape[2] + + ptr = CUBLAS_Context.get_instance().get_context(A.device) + + is_on_gpu([B, A, out]) + lib.cbatched_igemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k), + get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc), + ct.c_long(strideA), ct.c_long(strideB), ct.c_long(strideC), ct.c_uint32(num_batch)) + return out + + +def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32): + shapeA = SA[0] + shapeB = SB[0] + dimsA = len(shapeA) + dimsB = len(shapeB) + assert dimsB == 2, 'Only two dimensional matrices are supported for argument B' + if dimsA == 2: + m = shapeA[0] + elif dimsA == 3: + m = shapeA[0] * shapeA[1] + + rows = n = shapeB[0] + assert prod(list(shapeA)) > 0, f'Input tensor dimensions need to be > 0: {shapeA}' + + # if the tensor is empty, return a transformed empty tensor with the right dimensions + if shapeA[0] == 0 and dimsA == 2: + return torch.empty((0, shapeB[0]), device=A.device, dtype=torch.float16) + elif shapeA[1] == 0 and dimsA == 3: + return torch.empty(tuple(shapeA[:2] + [shapeB[0]]), device=A.device, dtype=torch.float16) + + if dimsA == 2 and out is None: + out, Sout = get_transform_buffer( + (shapeA[0], shapeB[0]), dtype, A.device, "col32", "row" + ) + elif dimsA == 3 and out is None: + out, Sout = get_transform_buffer( + (shapeA[0], shapeA[1], shapeB[0]), dtype, A.device, "col32", "row" + ) + + assert dimsB != 3, "len(B.shape)==3 not supported" + assert A.device.type == "cuda" + assert B.device.type == "cuda" + assert A.dtype == torch.int8 + assert B.dtype == torch.int8 + assert out.dtype == dtype + assert SA[1] == "col32" + assert SB[1] in ["col_turing", "col_ampere"] + assert Sout[1] == "col32" + assert ( + shapeA[-1] == shapeB[-1] + ), f"Matmullt only supports A @ B^T. Inner matrix dimensions do not match: A @ B = {shapeA} @ {shapeB}" + formatB = SB[1] + prev_device = A.device + torch.cuda.set_device(A.device) + + ptr = CUBLAS_Context.get_instance().get_context(A.device) + ptrA = get_ptr(A) + ptrB = get_ptr(B) + ptrC = get_ptr(out) + + k = shapeA[-1] + lda = ct.c_int32(m * 32) + if formatB == "col_turing": + # turing: tiles with rows filled up to multiple of 8 rows by 32 columns + # n = rows + ldb = ct.c_int32(((rows + 7) // 8) * 8 * 32) + else: + # ampere: tiles with rows filled up to multiple of 32 rows by 32 columns + # n = rows + ldb = ct.c_int32(((rows + 31) // 32) * 32 * 32) + + ldc = ct.c_int32(m * 32) + m = ct.c_int32(m) + n = ct.c_int32(n) + k = ct.c_int32(k) + + has_error = 0 + ptrRowScale = get_ptr(None) + is_on_gpu([A, B, out]) + if formatB == 'col_turing': + if dtype == torch.int32: + has_error = lib.cigemmlt_turing_32( + ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc + ) + else: + has_error = lib.cigemmlt_turing_8( + ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc + ) + elif formatB == "col_ampere": + if dtype == torch.int32: + has_error = lib.cigemmlt_ampere_32( + ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc + ) + else: + has_error = lib.cigemmlt_ampere_8( + ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc + ) + + if has_error == 1: + print(f'A: {shapeA}, B: {shapeB}, C: {Sout[0]}; (lda, ldb, ldc): {(lda, ldb, ldc)}; (m, n, k): {(m, n, k)}') + raise Exception('cublasLt ran into an error!') + + torch.cuda.set_device(prev_device) + + return out, Sout + + +def mm_dequant( + A, + quant_state, + row_stats, + col_stats, + out=None, + new_row_stats=None, + new_col_stats=None, + bias=None +): + assert A.dtype == torch.int32 + if bias is not None: assert bias.dtype == torch.float16 + out_shape = quant_state[0] + if len(out_shape) == 3: + out_shape = (out_shape[0] * out_shape[1], out_shape[2]) + + if out is None: + out = torch.empty(out_shape, dtype=torch.float16, device=A.device) + if new_row_stats is None: + new_row_stats = torch.empty( + out_shape[0], dtype=torch.float32, device=A.device + ) + if new_col_stats is None: + new_col_stats = torch.empty( + out_shape[1], dtype=torch.float32, device=A.device + ) + assert ( + new_row_stats.shape[0] == row_stats.shape[0] + ), f"{new_row_stats.shape} vs {row_stats.shape}" + assert ( + new_col_stats.shape[0] == col_stats.shape[0] + ), f"{new_col_stats.shape} vs {col_stats.shape}" + + prev_device = pre_call(A.device) + ptrA = get_ptr(A) + ptrOut = get_ptr(out) + ptrRowStats = get_ptr(row_stats) + ptrColStats = get_ptr(col_stats) + ptrNewRowStats = get_ptr(new_row_stats) + ptrNewColStats = get_ptr(new_col_stats) + ptrBias = get_ptr(bias) + numRows = ct.c_int32(out_shape[0]) + numCols = ct.c_int32(out_shape[1]) + + is_on_gpu([A, row_stats, col_stats, out, new_row_stats, new_col_stats, bias]) + lib.cdequant_mm_int32_fp16(ptrA, ptrRowStats, ptrColStats, ptrOut, ptrNewRowStats, ptrNewColStats, ptrBias, numRows, numCols) + post_call(prev_device) + + return out + + +def get_colrow_absmax( + A, row_stats=None, col_stats=None, nnz_block_ptr=None, threshold=0.0 +): + assert A.dtype == torch.float16 + device = A.device + + cols = A.shape[-1] + if len(A.shape) == 3: + rows = A.shape[0] * A.shape[1] + else: + rows = A.shape[0] + + col_tiles = (cols + 255) // 256 + tiled_rows = ((rows + 15) // 16) * 16 + if row_stats is None: + row_stats = torch.empty( + (rows,), dtype=torch.float32, device=device + ).fill_(-50000.0) + if col_stats is None: + col_stats = torch.empty( + (cols,), dtype=torch.float32, device=device + ).fill_(-50000.0) + + if nnz_block_ptr is None and threshold > 0.0: + nnz_block_ptr = torch.zeros( + ((tiled_rows * col_tiles) + 1,), dtype=torch.int32, device=device + ) + + ptrA = get_ptr(A) + ptrRowStats = get_ptr(row_stats) + ptrColStats = get_ptr(col_stats) + ptrNnzrows = get_ptr(nnz_block_ptr) + rows = ct.c_int32(rows) + cols = ct.c_int32(cols) + + prev_device = pre_call(A.device) + is_on_gpu([A, row_stats, col_stats, nnz_block_ptr]) + lib.cget_col_row_stats(ptrA, ptrRowStats, ptrColStats, ptrNnzrows, ct.c_float(threshold), rows, cols) + post_call(prev_device) + + if threshold > 0.0: + nnz_block_ptr.cumsum_(0) + + return row_stats, col_stats, nnz_block_ptr + + +class COOSparseTensor: + def __init__(self, rows, cols, nnz, rowidx, colidx, values): + assert rowidx.dtype == torch.int32 + assert colidx.dtype == torch.int32 + assert values.dtype == torch.float16 + assert values.numel() == nnz + assert rowidx.numel() == nnz + assert colidx.numel() == nnz + + self.rows = rows + self.cols = cols + self.nnz = nnz + self.rowidx = rowidx + self.colidx = colidx + self.values = values + + +class CSRSparseTensor: + def __init__(self, rows, cols, nnz, rowptr, colidx, values): + assert rowptr.dtype == torch.int32 + assert colidx.dtype == torch.int32 + assert values.dtype == torch.float16 + assert values.numel() == nnz + assert colidx.numel() == nnz + assert rowptr.numel() == rows + 1 + + self.rows = rows + self.cols = cols + self.nnz = nnz + self.rowptr = rowptr + self.colidx = colidx + self.values = values + + +class CSCSparseTensor: + def __init__(self, rows, cols, nnz, colptr, rowidx, values): + assert colptr.dtype == torch.int32 + assert rowidx.dtype == torch.int32 + assert values.dtype == torch.float16 + assert values.numel() == nnz + assert rowidx.numel() == nnz + assert colptr.numel() == cols + 1 + + self.rows = rows + self.cols = cols + self.nnz = nnz + self.colptr = colptr + self.rowidx = rowidx + self.values = values + + +def coo2csr(cooA): + values, counts = torch.unique(cooA.rowidx, return_counts=True) + values.add_(1) + rowptr = torch.zeros( + (cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device + ) + rowptr.scatter_(index=values.long(), src=counts.int(), dim=0) + rowptr.cumsum_(0) + return CSRSparseTensor( + cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values + ) + + +def coo2csc(cooA): + val, col2rowidx = torch.sort(cooA.colidx) + rowidx = cooA.rowidx[col2rowidx] + values = cooA.values[col2rowidx] + colvalues, counts = torch.unique(val, return_counts=True) + colvalues.add_(1) + colptr = torch.zeros( + (cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device + ) + colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0) + colptr.cumsum_(0) + return CSCSparseTensor( + cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values + ) + + +def coo_zeros(rows, cols, nnz, device, dtype=torch.half): + rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device) + colidx = torch.zeros((nnz,), dtype=torch.int32, device=device) + values = torch.zeros((nnz,), dtype=dtype, device=device) + return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values) + + +def double_quant( + A, col_stats=None, row_stats=None, out_col=None, out_row=None, threshold=0.0 +): + device = A.device + assert A.dtype == torch.half + assert device.type == "cuda" + prev_device = pre_call(A.device) + + cols = A.shape[-1] + if len(A.shape) == 3: + rows = A.shape[0] * A.shape[1] + else: + rows = A.shape[0] + + if row_stats is None or col_stats is None: + row_stats, col_stats, nnz_row_ptr = get_colrow_absmax( + A, threshold=threshold + ) + + if out_col is None: + out_col = torch.zeros(A.shape, device=device, dtype=torch.int8) + if out_row is None: + out_row = torch.zeros(A.shape, device=device, dtype=torch.int8) + + coo_tensor = None + ptrA = get_ptr(A) + ptrColStats = get_ptr(col_stats) + ptrRowStats = get_ptr(row_stats) + ptrOutCol = get_ptr(out_col) + ptrOutRow = get_ptr(out_row) + + is_on_gpu([A, col_stats, row_stats, out_col, out_row]) + if threshold > 0.0: + nnz = nnz_row_ptr[-1].item() + if nnz > 0: + coo_tensor = coo_zeros( + A.shape[0], A.shape[1], nnz_row_ptr[-1].item(), device + ) + ptrRowIdx = get_ptr(coo_tensor.rowidx) + ptrColIdx = get_ptr(coo_tensor.colidx) + ptrVal = get_ptr(coo_tensor.values) + ptrRowPtr = get_ptr(nnz_row_ptr) + + lib.cdouble_rowcol_quant( + ptrA, + ptrRowStats, + ptrColStats, + ptrOutCol, + ptrOutRow, + ptrRowIdx, + ptrColIdx, + ptrVal, + ptrRowPtr, + ct.c_float(threshold), + ct.c_int32(rows), + ct.c_int32(cols), + ) + val, idx = torch.sort(coo_tensor.rowidx) + coo_tensor.rowidx = val + coo_tensor.colidx = coo_tensor.colidx[idx] + coo_tensor.values = coo_tensor.values[idx] + else: + lib.cdouble_rowcol_quant( + ptrA, + ptrRowStats, + ptrColStats, + ptrOutCol, + ptrOutRow, + None, + None, + None, + None, + ct.c_float(0.0), + ct.c_int32(rows), + ct.c_int32(cols), + ) + else: + lib.cdouble_rowcol_quant( + ptrA, + ptrRowStats, + ptrColStats, + ptrOutCol, + ptrOutRow, + None, + None, + None, + None, + ct.c_float(threshold), + ct.c_int32(rows), + ct.c_int32(cols), + ) + post_call(prev_device) + + return out_row, out_col, row_stats, col_stats, coo_tensor + + +def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None): + prev_device = pre_call(A.device) + if state is None: state = (A.shape, from_order) + else: from_order = state[1] + if out is None: out, new_state = get_transform_buffer(state[0], A.dtype, A.device, to_order, state[1], transpose) + else: new_state = (state[0], to_order) # (shape, order) + + shape = state[0] + if len(shape) == 2: + dim1 = ct.c_int32(shape[0]) + dim2 = ct.c_int32(shape[1]) + else: + dim1 = ct.c_int32(shape[0] * shape[1]) + dim2 = ct.c_int32(shape[2]) + + is_on_gpu([A, out]) + if to_order == 'col32': + if transpose: + lib.ctransform_row2col32T(get_ptr(A), get_ptr(out), dim1, dim2) + else: + lib.ctransform_row2col32(get_ptr(A), get_ptr(out), dim1, dim2) + elif to_order == "col_turing": + if transpose: + lib.ctransform_row2turingT(get_ptr(A), get_ptr(out), dim1, dim2) + else: + lib.ctransform_row2turing(get_ptr(A), get_ptr(out), dim1, dim2) + elif to_order == "col_ampere": + if transpose: + lib.ctransform_row2ampereT(get_ptr(A), get_ptr(out), dim1, dim2) + else: + lib.ctransform_row2ampere(get_ptr(A), get_ptr(out), dim1, dim2) + elif to_order == "row": + if from_order == "col_turing": + lib.ctransform_turing2row(get_ptr(A), get_ptr(out), dim1, dim2) + elif from_order == "col_ampere": + lib.ctransform_ampere2row(get_ptr(A), get_ptr(out), dim1, dim2) + else: + raise NotImplementedError(f'Transform function not implemented: From {from_order} to {to_order}') + + post_call(prev_device) + + return out, new_state + + +def spmm_coo(cooA, B, out=None): + if out is None: + out = torch.empty( + (cooA.rows, B.shape[1]), device=B.device, dtype=B.dtype + ) + nnz = cooA.nnz + assert cooA.rowidx.numel() == nnz + assert cooA.colidx.numel() == nnz + assert cooA.values.numel() == nnz + assert cooA.cols == B.shape[0] + + transposed_B = False if B.is_contiguous() else True + + ldb = B.stride()[(1 if transposed_B else 0)] + ldc = B.shape[1] + + ptr = Cusparse_Context.get_instance().context + + ptrRowidx = get_ptr(cooA.rowidx) + ptrColidx = get_ptr(cooA.colidx) + ptrValues = get_ptr(cooA.values) + ptrB = get_ptr(B) + ptrC = get_ptr(out) + cnnz = ct.c_int32(cooA.nnz) + crowsA = ct.c_int32(cooA.rows) + ccolsA = ct.c_int32(cooA.cols) + ccolsB = ct.c_int32(B.shape[1]) + cldb = ct.c_int32(ldb) + cldc = ct.c_int32(ldc) + + is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out]) + lib.cspmm_coo(ptr, ptrRowidx, ptrColidx, ptrValues, cnnz, crowsA, ccolsA, ccolsB, cldb, ptrB, cldc, ptrC, ct.c_bool(transposed_B)) + + return out + + +def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None): + if out is None: + out = torch.zeros( + (cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype + ) + nnz = cooA.nnz + prev_device = pre_call(B.device) + assert cooA.rowidx.numel() == nnz + assert cooA.colidx.numel() == nnz + assert cooA.values.numel() == nnz + assert cooA.cols == B.shape[0], f"{cooA.cols} vs {B.shape}" + + transposed_B = False if B.is_contiguous() else True + + ldb = B.stride()[(1 if transposed_B else 0)] + ldc = B.shape[1] + + values, counts = torch.unique(cooA.rowidx, return_counts=True) + offset = counts.cumsum(0).int() + max_count, max_idx = torch.sort(counts, descending=True) + max_idx = max_idx.int() + max_count = max_count.int() + assert ( + max_count[0] <= 32 + ), f"Current max count per row is 8 but found {max_count[0]}." + assert B.dtype in [torch.float16, torch.int8] + ptrOffset = get_ptr(offset) + ptrMaxCount = get_ptr(max_count) + ptrMaxIdx = get_ptr(max_idx) + + ptrRowidx = get_ptr(cooA.rowidx) + ptrColidx = get_ptr(cooA.colidx) + ptrValues = get_ptr(cooA.values) + ptrB = get_ptr(B) + ptrC = get_ptr(out) + ptrDequantStats = get_ptr(dequant_stats) + cnnz_rows = ct.c_int32(counts.numel()) + cnnz = ct.c_int32(cooA.nnz) + crowsA = ct.c_int32(cooA.rows) + ccolsA = ct.c_int32(cooA.cols) + crowsB = ct.c_int32(B.shape[1]) + ccolsB = ct.c_int32(B.shape[1]) + cldb = ct.c_int32(ldb) + cldc = ct.c_int32(ldc) + + is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats]) + if B.dtype == torch.float16: + lib.cspmm_coo_very_sparse_naive_fp16( + ptrMaxCount, + ptrMaxIdx, + ptrOffset, + ptrRowidx, + ptrColidx, + ptrValues, + ptrB, + ptrC, + ptrDequantStats, + cnnz_rows, + cnnz, + crowsA, + crowsB, + ccolsB, + ) + elif B.dtype == torch.int8: + lib.cspmm_coo_very_sparse_naive_int8( + ptrMaxCount, + ptrMaxIdx, + ptrOffset, + ptrRowidx, + ptrColidx, + ptrValues, + ptrB, + ptrC, + ptrDequantStats, + cnnz_rows, + cnnz, + crowsA, + crowsB, + ccolsB, + ) + # else: assertion error + post_call(prev_device) + + return out + + +C = 127.0 + + +def vectorwise_quant(x, dim=1, quant_type="vector"): + if quant_type == "linear": + max1 = torch.abs(x).max().float() + xq = torch.round(x / max1 * 127).to(torch.int8) + return xq, max1 + elif quant_type in ["vector", "row"]: + max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True) + xq = torch.round(x * (C / max1)).to(torch.int8) + return xq, max1 + elif quant_type == "zeropoint": + dtype = x.dtype + x = x.float() + dyna = x.max() - x.min() + if dyna == 0: + dyna = 1 + qx = 255.0 / dyna + minx = x.min() + zpx = torch.round(minx * qx) + x = torch.round(qx * x - zpx) + zpx + return x, qx + elif quant_type in ["vector-zeropoint", "row-zeropoint"]: + dtype = x.dtype + x = x.float() + dyna = torch.amax(x, dim=dim, keepdim=True) - torch.amin( + x, dim=dim, keepdim=True + ) + dyna[dyna == 0] = 1 + qx = 255.0 / dyna + minx = torch.amin(x, dim=dim, keepdim=True) + zpx = torch.round(minx * qx) + x = torch.round(qx * x - zpx) + zpx + return x, qx + elif quant_type == "truncated-vector": + with torch.no_grad(): + absx = torch.abs(x) + max1 = torch.amax(absx, dim=dim, keepdim=True) + max1 = max1 * 0.7 + idx = absx > max1.expand_as(absx) + sign = torch.sign(x[idx]) + x[idx] = max1.expand_as(absx)[idx] * sign + xq = torch.round(x / max1 * C).to(torch.int8) + return xq, max1 + else: + return None + + +def vectorwise_dequant(xq, max1, quant_type="vector"): + if quant_type == "vector": + x = (xq / C * max1).to(torch.float32) + return x + else: + return None + + +def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half, quant_type="vector"): + if quant_type == "linear": + norm = S1 * S2 / (C * C) + # double cast needed to prevent overflows + return (xq.float() * norm).to(dtype) + elif quant_type == "zeropoint": + norm = 1.0 / (S1 * S2) + return (xq.float() * norm).to(dtype) + elif quant_type == "row-zeropoint": + norm = 1.0 / (S1 * S2) + x = xq.float() + if len(S1.shape) == 3 and len(x.shape) == 2: + S1 = S1.squeeze(0) + if len(S2.shape) == 3 and len(x.shape) == 2: + S2 = S2.squeeze(0) + if len(S1.shape) == 2: + x *= norm + else: + x *= norm + return x.to(dtype) + elif quant_type == "vector-zeropoint": + x = xq.float() + if len(S1.shape) == 3 and len(x.shape) == 2: + S1 = S1.squeeze(0) + if len(S2.shape) == 3 and len(x.shape) == 2: + S2 = S2.squeeze(0) + if len(S1.shape) == 2: + x *= 1.0 / S1 + else: + x *= 1.0 / S1 + x *= 1.0 / S2.t() + return x.to(dtype) + elif quant_type == "row": + x = xq.float() + if len(S1.shape) == 3 and len(x.shape) == 2: + S1 = S1.squeeze(0) + if len(S2.shape) == 3 and len(x.shape) == 2: + S2 = S2.squeeze(0) + if len(S1.shape) == 2: + x *= S1 * S2 / (C * C) + else: + x *= S1 * S2 / (C * C) + return x.to(dtype) + elif quant_type in ["truncated-vector", "vector"]: + x = xq.float() + if len(S1.shape) == 3 and len(x.shape) == 2: + S1 = S1.squeeze(0) + if len(S2.shape) == 3 and len(x.shape) == 2: + S2 = S2.squeeze(0) + if len(S1.shape) == 2: + x *= S1 / C + else: + x *= S1 / C + x *= S2 / C + return x.to(dtype) + else: + return None + + +def dequant_min_max(xq, A, B, SA, SB, dtype=torch.half): + offset = B.float().t().sum(0) * (SA[0] + SA[1]) + x = xq.float() + if len(xq.shape) == 2 and len(SB.shape) == 3: + SB = SB.squeeze(0) + if len(SB.shape) == 2: + x *= SB.t() / 127 + else: + x *= SB / 127 + x *= SA[1] / 127 + x += offset + return x.to(dtype) + + +def extract_outliers(A, SA, idx): + shapeA = SA[0] + formatA = SA[1] + assert formatA in ["col_turing", "col_ampere"] + assert A.device.type == "cuda" + + out = torch.zeros( + (shapeA[0], idx.numel()), dtype=torch.int8, device=A.device + ) + + idx_size = ct.c_int32(idx.numel()) + rows = ct.c_int32(shapeA[0]) + cols = ct.c_int32(shapeA[1]) + ptrA = get_ptr(A) + ptrIdx = get_ptr(idx) + ptrOut = get_ptr(out) + + prev_device = pre_call(A.device) + if formatA == 'col_turing': + lib.cextractOutliers_turing(ptrA, ptrIdx, ptrOut, idx_size, rows, cols) + elif formatA == "col_ampere": + lib.cextractOutliers_ampere(ptrA, ptrIdx, ptrOut, idx_size, rows, cols) + post_call(prev_device) + + return out + +def pipeline_test(A, batch_size): + out = torch.zeros_like(A) + lib.cpipeline_test(get_ptr(A), get_ptr(out), ct.c_size_t(A.numel()), ct.c_size_t(batch_size)) + return out diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so new file mode 100644 index 0000000000000000000000000000000000000000..5c3f792a48afa01dd9725cd8ab1c8ec6b54c7954 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d4d33ffbdc477c2cf67635a876cd185d07032884e45f729225f925ad411290ac +size 5938904 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..3959a0014bd6f013573502a1b57e30973a138042 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abfd599f616509de8b6976308e40eb13faead2516e3700e318126558cfcdb9f8 +size 11110784 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so new file mode 100644 index 0000000000000000000000000000000000000000..fd78462b7dde9b8c0d0a76b12a41b77bab86577e --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2412d9eb0056079c750dab05ab1728839f71c64b395c7ccb01d4051998c28836 +size 8974040 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..f0309ef9ce808617cd4019e0aa62614b0bfe6091 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6ab2775c01535c0b3f8b717cad598c7cd613c98227c880d0f0d4c4b1f356b2b +size 20244864 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so new file mode 100644 index 0000000000000000000000000000000000000000..8a4504ac8f7645131afa619fbfcec71c0ede8438 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:921d1d561cf912848872916846df6f07daed31261ac85ad3d6e4260c03ff9c22 +size 9313912 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..355e0555ffd13fcf2cc8b2e1fdbe74cf30b9c511 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f07c6d5f84ea95d3299cce99fc455a2cf797b2ebb50b92021f67df7a18d613 +size 20925040 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..a6bbb49ac365bf53362f07edc6c65a04cb628e3f --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ea73f42ca2c101ce377b70d14d03ee901b97b51861b570b7c4a21edddb7cf10 +size 20741032 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..7ca11b96a2914aa4f965d68e0dbc5f1610e60899 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19a62aa3c37b70d9316c0848f9d8b3cb205bb8ea9b11b3514749f287c2c8596e +size 26516696 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so new file mode 100644 index 0000000000000000000000000000000000000000..a859980aa19d38192e79be293014436b9c421116 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d689551ab03f16b9b767361a5310ca4727965db2654d35a194f8ced5ad2297fa +size 14504296 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..b29a998ec779488eaddd8d0198e406e09c6e1bc5 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5545f220765be15ebe48e18f54f5951f278a69f1bdc503cb11021e95387af4ea +size 25709592 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so new file mode 100644 index 0000000000000000000000000000000000000000..da3c55da0a799c67977f1b07f9f998dcfac2efd9 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d128372dfbdb34a68429d2c3454a076fac650c79dda546ba2be9a5415378463 +size 14512488 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..15acdada207bf73a2f490becd86775d6c1dafe7f --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61e607dfa9b987637bb542d4a19f06b7e0807dbf2cccb0cf5b99bd38b0101401 +size 25721880 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so new file mode 100644 index 0000000000000000000000000000000000000000..c30e30e8986e2d1128f7c3181c41bc72df4e99b0 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16b8578667eb6836c6b7923a3b3508d62809e4e91429674a0c3ab97cf60c5349 +size 14561032 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so new file mode 100644 index 0000000000000000000000000000000000000000..772cadd95b72b6eed12cd5938ff0bb00fdc7d766 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58f4a2043fe8cee52c93f69176825de57553727a5a9e7984991eb2a24da66530 +size 25803272 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..49d7b5cedf9df3ed4bdd0a762edbb82db903ec36 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from .modules import Int8Params, Linear8bitLt, StableEmbedding, Linear4bit, LinearNF4, LinearFP4, Params4bit, OutlierAwareLinear, SwitchBackLinearBnb +from .triton_based_modules import SwitchBackLinear, SwitchBackLinearGlobal, SwitchBackLinearVectorwise, StandardLinear diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f78b2237b0207aa6ec95eb2d7629e2470f5a14e Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31219de67e9d1934728191581d7bfdcbbb687049 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96b3bf1d258a8f97d8edc24fd6d0ce533494d2f5 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/nn/modules.py b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..3d34bb45f7372ac16bbe212e101393323dbadd2e --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/modules.py @@ -0,0 +1,518 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from typing import Optional, TypeVar, Union, overload + +import warnings +import torch +import torch.nn.functional as F +from torch import Tensor, device, dtype, nn + +import bitsandbytes as bnb +import bitsandbytes.functional +from bitsandbytes.autograd._functions import undo_layout, get_tile_inds +from bitsandbytes.optim import GlobalOptimManager +from bitsandbytes.utils import OutlierTracer, find_outlier_dims + +T = TypeVar("T", bound="torch.nn.Module") + + +class StableEmbedding(torch.nn.Embedding): + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + padding_idx: Optional[int] = None, + max_norm: Optional[float] = None, + norm_type: float = 2.0, + scale_grad_by_freq: bool = False, + sparse: bool = False, + _weight: Optional[Tensor] = None, + device=None, + dtype=None, + ) -> None: + super().__init__( + num_embeddings, + embedding_dim, + padding_idx, + max_norm, + norm_type, + scale_grad_by_freq, + sparse, + _weight, + device, + dtype, + ) + self.norm = torch.nn.LayerNorm(embedding_dim, device=device) + GlobalOptimManager.get_instance().register_module_override( + self, "weight", {"optim_bits": 32} + ) + + def reset_parameters(self) -> None: + torch.nn.init.xavier_uniform_(self.weight) + self._fill_padding_idx_with_zero() + + """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding + to make the Layer compatible with Pytorch < 1.9. + This means that if this changes in future PyTorch releases this need to change too + which is cumbersome. However, with this we can ensure compatibility with previous + PyTorch releases. + """ + + def _fill_padding_idx_with_zero(self) -> None: + if self.padding_idx is not None: + with torch.no_grad(): + self.weight[self.padding_idx].fill_(0) + + def forward(self, input: Tensor) -> Tensor: + emb = F.embedding( + input, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) + + # always apply layer norm in full precision + emb = emb.to(torch.get_default_dtype()) + + return self.norm(emb).to(self.weight.dtype) + + +class Embedding(torch.nn.Embedding): + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + padding_idx: Optional[int] = None, + max_norm: Optional[float] = None, + norm_type: float = 2.0, + scale_grad_by_freq: bool = False, + sparse: bool = False, + _weight: Optional[Tensor] = None, + device: Optional[device] = None, + ) -> None: + super().__init__( + num_embeddings, + embedding_dim, + padding_idx, + max_norm, + norm_type, + scale_grad_by_freq, + sparse, + _weight, + device=device + ) + GlobalOptimManager.get_instance().register_module_override( + self, "weight", {"optim_bits": 32} + ) + + def reset_parameters(self) -> None: + torch.nn.init.xavier_uniform_(self.weight) + self._fill_padding_idx_with_zero() + + """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding + to make the Layer compatible with Pytorch < 1.9. + This means that if this changes in future PyTorch releases this need to change too + which is cumbersome. However, with this we can ensure compatibility with previous + PyTorch releases. + """ + + def _fill_padding_idx_with_zero(self) -> None: + if self.padding_idx is not None: + with torch.no_grad(): + self.weight[self.padding_idx].fill_(0) + + def forward(self, input: Tensor) -> Tensor: + emb = F.embedding( + input, + self.weight, + self.padding_idx, + self.max_norm, + self.norm_type, + self.scale_grad_by_freq, + self.sparse, + ) + + return emb + +class Params4bit(torch.nn.Parameter): + def __new__(cls, data=None, requires_grad=True, quant_state=None, blocksize=64, compress_statistics=True, quant_type='fp4'): + if data is None: + data = torch.empty(0) + + self = torch.Tensor._make_subclass(cls, data, requires_grad) + self.blocksize = blocksize + self.compress_statistics = compress_statistics + self.quant_type = quant_type + self.quant_state = quant_state + self.data = data + return self + + def cuda(self, device): + w = self.data.contiguous().half().cuda(device) + w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type) + self.data = w_4bit + self.quant_state = quant_state + + return self + + @overload + def to(self: T, device: Optional[Union[int, device]] = ..., dtype: Optional[Union[dtype, str]] = ..., non_blocking: bool = ...,) -> T: + ... + + @overload + def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: + ... + + @overload + def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: + ... + + def to(self, *args, **kwargs): + device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs) + + if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"): + return self.cuda(device) + else: + s = self.quant_state + if s is not None: + # make sure the quantization state is on the right device + s[0] = s[0].to(device) + if self.compress_statistics: + # TODO: refactor this. This is a nightmare + # for 4-bit: + # state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type] + # state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type] + #s[-2][0] = s[-2][0].to(device) # offset + #s[-2][1][0] = s[-2][1][0].to(device) # nested absmax + + # for 8-bit + s[-3][0] = s[-3][0].to(device) # offset + s[-3][1][0] = s[-3][1][0].to(device) # nested quantiation state statitics + s[-3][1][1] = s[-3][1][1].to(device) # nested quantiation codebook + new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking), + requires_grad=self.requires_grad, quant_state=self.quant_state, + blocksize=self.blocksize, compress_statistics=self.compress_statistics, + quant_type=self.quant_type) + + return new_param + +class Linear4bit(nn.Linear): + def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None): + super().__init__(input_features, output_features, bias, device) + self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type) + self.compute_dtype = compute_dtype + self.compute_type_is_set = False + + def set_compute_type(self, x): + if x.dtype in [torch.float32, torch.bfloat16]: + # the input is in a dtype that is safe to compute in, we switch + # to this type for speed and stability + self.compute_dtype = x.dtype + elif x.dtype == torch.float16: + # we take the compoute dtype passed into the layer + if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]): + # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast + # warn the user about this + warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference.') + warnings.filterwarnings('ignore', message='.*inference.') + if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]): + warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference or training speed.') + warnings.filterwarnings('ignore', message='.*inference or training') + + + + + + + def forward(self, x: torch.Tensor): + # weights are cast automatically as Int8Params, but the bias has to be cast manually + if self.bias is not None and self.bias.dtype != x.dtype: + self.bias.data = self.bias.data.to(x.dtype) + + if getattr(self.weight, 'quant_state', None) is None: + print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.') + if not self.compute_type_is_set: + self.set_compute_type(x) + self.compute_type_is_set = True + + inp_dtype = x.dtype + if self.compute_dtype is not None: + x = x.to(self.compute_dtype) + + bias = None if self.bias is None else self.bias.to(self.compute_dtype) + out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state) + + out = out.to(inp_dtype) + + return out + +class LinearFP4(Linear4bit): + def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None): + super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device) + +class LinearNF4(Linear4bit): + ''' Implements the NF4 data type. + + Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that + is normalized into the range [-1, 1]. + + For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314) + + Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in + the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236. + ''' + def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None): + super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device) + + + +class Int8Params(torch.nn.Parameter): + def __new__( + cls, + data=None, + requires_grad=True, + has_fp16_weights=False, + CB=None, + SCB=None, + ): + cls.has_fp16_weights = has_fp16_weights + cls.CB = None + cls.SCB = None + if data is None: + data = torch.empty(0) + return torch.Tensor._make_subclass(cls, data, requires_grad) + + def cuda(self, device): + if self.has_fp16_weights: + return super().cuda(device) + else: + # we store the 8-bit rows-major weight + # we convert this weight to the turning/ampere weight during the first inference pass + B = self.data.contiguous().half().cuda(device) + CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B) + del CBt + del SCBt + self.data = CB + setattr(self, "CB", CB) + setattr(self, "SCB", SCB) + + return self + + @overload + def to( + self: T, + device: Optional[Union[int, device]] = ..., + dtype: Optional[Union[dtype, str]] = ..., + non_blocking: bool = ..., + ) -> T: + ... + + @overload + def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: + ... + + @overload + def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: + ... + + def to(self, *args, **kwargs): + device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to( + *args, **kwargs + ) + + if ( + device is not None + and device.type == "cuda" + and self.data.device.type == "cpu" + ): + return self.cuda(device) + else: + new_param = Int8Params( + super().to( + device=device, dtype=dtype, non_blocking=non_blocking + ), + requires_grad=self.requires_grad, + has_fp16_weights=self.has_fp16_weights, + ) + new_param.CB = self.CB + new_param.SCB = self.SCB + + return new_param + + +def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + weight = state_dict.get(f"{prefix}weight") + if weight is None: + # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing + return + weight_format = state_dict.pop(f"{prefix}weight_format", "row") + + if weight_format != "row": + tile_indices = get_tile_inds(weight_format, weight.device) + state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices) + + +class Linear8bitLt(nn.Linear): + def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True, + memory_efficient_backward=False, threshold=0.0, index=None, device=None): + super().__init__(input_features, output_features, bias, device) + assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0" + self.state = bnb.MatmulLtState() + self.index = index + + self.state.threshold = threshold + self.state.has_fp16_weights = has_fp16_weights + self.state.memory_efficient_backward = memory_efficient_backward + if threshold > 0.0 and not has_fp16_weights: + self.state.use_pool = True + + self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights) + self._register_load_state_dict_pre_hook(maybe_rearrange_weight) + + def _save_to_state_dict(self, destination, prefix, keep_vars): + super()._save_to_state_dict(destination, prefix, keep_vars) + + # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data + scb_name = "SCB" + + # case 1: .cuda was called, SCB is in self.weight + param_from_weight = getattr(self.weight, scb_name) + # case 2: self.init_8bit_state was called, SCB is in self.state + param_from_state = getattr(self.state, scb_name) + # case 3: SCB is in self.state, weight layout reordered after first forward() + layout_reordered = self.state.CxB is not None + + key_name = prefix + f"{scb_name}" + format_name = prefix + "weight_format" + + if not self.state.has_fp16_weights: + if param_from_weight is not None: + destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach() + destination[format_name] = "row" + elif param_from_state is not None and not layout_reordered: + destination[key_name] = param_from_state if keep_vars else param_from_state.detach() + destination[format_name] = "row" + elif param_from_state is not None: + destination[key_name] = param_from_state if keep_vars else param_from_state.detach() + destination[format_name] = self.state.formatB + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs) + unexpected_copy = list(unexpected_keys) + + for key in unexpected_copy: + input_name = key[len(prefix):] + if input_name == "SCB": + if self.weight.SCB is None: + # buffers not yet initialized, can't access them directly without quantizing first + raise RuntimeError("Loading a quantized checkpoint into non-quantized Linear8bitLt is " + "not supported. Please call module.cuda() before module.load_state_dict()") + + input_param = state_dict[key] + self.weight.SCB.copy_(input_param) + + if self.state.SCB is not None: + self.state.SCB = self.weight.SCB + + unexpected_keys.remove(key) + + def init_8bit_state(self): + self.state.CB = self.weight.CB + self.state.SCB = self.weight.SCB + self.weight.CB = None + self.weight.SCB = None + + def forward(self, x: torch.Tensor): + self.state.is_training = self.training + if self.weight.CB is not None: + self.init_8bit_state() + + # weights are cast automatically as Int8Params, but the bias has to be cast manually + if self.bias is not None and self.bias.dtype != x.dtype: + self.bias.data = self.bias.data.to(x.dtype) + + out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) + + if not self.state.has_fp16_weights: + if self.state.CB is not None and self.state.CxB is not None: + # we converted 8-bit row major to turing/ampere format in the first inference pass + # we no longer need the row-major weight + del self.state.CB + self.weight.data = self.state.CxB + return out + + +class OutlierAwareLinear(nn.Linear): + def __init__(self, input_features, output_features, bias=True, device=None): + super().__init__(input_features, output_features, bias, device) + self.outlier_dim = None + self.is_quantized = False + + def forward_with_outliers(self, x, outlier_idx): + raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function') + + def quantize_weight(self, w, outlier_idx): + raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function') + + def forward(self, x): + if self.outlier_dim is None: + tracer = OutlierTracer.get_instance() + if not tracer.is_initialized(): + print('Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer') + outlier_idx = tracer.get_outliers(self.weight) + #print(outlier_idx, tracer.get_hvalue(self.weight)) + self.outlier_dim = outlier_idx + + if not self.is_quantized: + w = self.quantize_weight(self.weight, self.outlier_dim) + self.weight.data.copy_(w) + self.is_quantized = True + +class SwitchBackLinearBnb(nn.Linear): + def __init__( + self, + input_features, + output_features, + bias=True, + has_fp16_weights=True, + memory_efficient_backward=False, + threshold=0.0, + index=None, + device=None + ): + super().__init__( + input_features, output_features, bias, device + ) + self.state = bnb.MatmulLtState() + self.index = index + + self.state.threshold = threshold + self.state.has_fp16_weights = has_fp16_weights + self.state.memory_efficient_backward = memory_efficient_backward + if threshold > 0.0 and not has_fp16_weights: + self.state.use_pool = True + + self.weight = Int8Params( + self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights + ) + + def init_8bit_state(self): + self.state.CB = self.weight.CB + self.state.SCB = self.weight.SCB + self.weight.CB = None + self.weight.SCB = None + + def forward(self, x): + self.state.is_training = self.training + + if self.weight.CB is not None: + self.init_8bit_state() + + out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..6fbf583b90e9f8213b06be9e84210ff03f8b271b --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py @@ -0,0 +1,258 @@ +import torch +import torch.nn as nn +import time +from functools import partial + +from bitsandbytes.triton.triton_utils import is_triton_available + +from bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise +from bitsandbytes.triton.quantize_rowwise import quantize_rowwise +from bitsandbytes.triton.quantize_columnwise_and_transpose import quantize_columnwise_and_transpose +from bitsandbytes.triton.int8_matmul_rowwise_dequantize import int8_matmul_rowwise_dequantize +from bitsandbytes.triton.quantize_global import quantize_global, quantize_global_transpose +from bitsandbytes.triton.int8_matmul_mixed_dequanitze import int8_matmul_mixed_dequanitze + + +class _switchback_global(torch.autograd.Function): + + @staticmethod + def forward(ctx, X_3D, W, bias): + # reshape input to [N * L, D] + X = X_3D.view(-1, X_3D.size(-1)) + + # rowwise quantize for X, global quantize for W + X_int8, state_X = quantize_rowwise(X) + W_int8, state_W = quantize_global(W) + + # save for backward. + ctx.save_for_backward = X, W + + # matmult, fused dequant and add bias + # call "mixed" because we are mixing rowwise quantized and global quantized + return int8_matmul_mixed_dequanitze( + X_int8, W_int8.t(), state_X, state_W, bias + ).view(*X_3D.size()[:-1], -1) + + @staticmethod + def backward(ctx, G_3D): + # reshape input to [N_out * L, D] + G = G_3D.reshape(-1, G_3D.size(-1)) + + grad_X = grad_W = grad_bias = None + + X, W = ctx.save_for_backward + if ctx.needs_input_grad[0]: + # rowwise quantize for G, global quantize for W + # for W, we also fuse the transpose operation because only A @ B^T is supported + # so we transpose once then call .t() in the matmul + G_int8, state_G = quantize_rowwise(G) + W_int8, state_W = quantize_global_transpose(W) + grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view( + *G_3D.size()[:-1], -1 + ) + if ctx.needs_input_grad[1]: + # backward pass uses standard weight grad + grad_W = torch.matmul(G.t(), X.to(G.dtype)) + if ctx.needs_input_grad[2]: + grad_bias = G.sum(dim=0) + + return grad_X, grad_W, grad_bias + +class _switchback_vectorrize(torch.autograd.Function): + + @staticmethod + def forward(ctx, X_3D, W, bias): + # reshape input to [N * L, D] + X = X_3D.view(-1, X_3D.size(-1)) + + ctx.save_for_backward = X, W + # rowwise quantize for X + # columnwise quantize for W (first rowwise, transpose later) + X_int8, state_X = quantize_rowwise(X) + W_int8, state_W = quantize_rowwise(W) + + # matmult, fused dequant and add bias + # call kernel which expects rowwise quantized X and W + return int8_matmul_rowwise_dequantize( + X_int8, W_int8.t(), state_X, state_W, bias + ).view(*X_3D.size()[:-1], -1) + + @staticmethod + def backward(ctx, G_3D): + X, W = ctx.save_for_backward + + G = G_3D.reshape(-1, G_3D.size(-1)) + + grad_X = grad_W = grad_bias = None + + if ctx.needs_input_grad[0]: + # rowwise quantize for G, columnwise quantize for W and fused transpose + # we call .t() for weight later because only A @ B^T is supported + G_int8, state_G = quantize_rowwise(G) + W_int8, state_W = quantize_columnwise_and_transpose(W) + grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view( + *G_3D.size()[:-1], -1 + ) + if ctx.needs_input_grad[1]: + # backward pass uses standard weight grad + grad_W = torch.matmul(G.t(), X.to(G.dtype)) + if ctx.needs_input_grad[2]: + grad_bias = G.sum(dim=0) + + return grad_X, grad_W, grad_bias + +class _switchback_global_mem_efficient(torch.autograd.Function): + + @staticmethod + def forward(ctx, X_3D, W, bias): + # reshape input to [N * L, D] + X = X_3D.view(-1, X_3D.size(-1)) + X_3D_sz = X_3D.size() + + # rowwise quantize for X, global quantize for W + X_int8, state_X = quantize_rowwise(X) + del X + W_int8, state_W = quantize_global(W) + + # save for backward. + ctx.save_for_backward = X_int8, state_X, W_int8, state_W + + # matmult, fused dequant and add bias + # call "mixed" because we are mixing rowwise quantized and global quantized + return int8_matmul_mixed_dequanitze( + X_int8, W_int8.t(), state_X, state_W, bias + ).view(*X_3D_sz[:-1], -1) + + @staticmethod + def backward(ctx, G_3D): + # reshape input to [N_out * L, D] + G = G_3D.reshape(-1, G_3D.size(-1)) + G_3D_sz = G_3D.size() + + grad_X = grad_W = grad_bias = None + + X_int8, state_X, W_int8, state_W = ctx.save_for_backward + if ctx.needs_input_grad[1]: + real_X = dequantize_rowwise(X_int8, state_X) + del X_int8 + grad_W = torch.matmul(G.t(), real_X.to(G.dtype)) + del real_X + if ctx.needs_input_grad[2]: + grad_bias = G.sum(dim=0) + if ctx.needs_input_grad[0]: + G_int8, state_G = quantize_rowwise(G) + del G + W_int8 = W_int8.t().contiguous() + grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view( + *G_3D_sz[:-1], -1 + ) + + return grad_X, grad_W, grad_bias + +class SwitchBackLinear(nn.Linear): + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + device=None, + dtype=None, + vector_wise_quantization: bool = False, + mem_efficient : bool = False, + ): + super().__init__(in_features, out_features, bias, device, dtype) + + if not is_triton_available: + raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear. + Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''') + + # By default, we use the global quantization. + self.vector_wise_quantization = vector_wise_quantization + if self.vector_wise_quantization: + self._fn = _switchback_vectorrize + if mem_efficient: + print('mem efficient is not supported for vector-wise quantization.') + exit(1) + else: + if mem_efficient: + self._fn = _switchback_global_mem_efficient + else: + self._fn = _switchback_global + + def prepare_for_eval(self): + # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass. + # Note this is experimental and not tested thoroughly. + # Note this needs to be explicitly called with something like + # def cond_prepare(m): + # if hasattr(m, "prepare_for_eval"): + # m.prepare_for_eval() + # model.apply(cond_prepare) + print('=> preparing for eval.') + if self.vector_wise_quantization: + W_int8, state_W = quantize_rowwise(self.weight) + else: + W_int8, state_W = quantize_global(self.weight) + + self.register_buffer("W_int8", W_int8) + self.register_buffer("state_W", state_W) + + del self.weight + + def forward(self, x): + if self.training: + return self._fn.apply(x, self.weight, self.bias) + else: + # If it hasn't been "prepared for eval", run the standard forward pass. + if not hasattr(self, "W_int8"): + return self._fn.apply(x, self.weight, self.bias) + + # Otherwise, use pre-computed weights. + X = x.view(-1, x.size(-1)) + X_int8, state_X = quantize_rowwise(X) + + if self.vector_wise_quantization: + return int8_matmul_rowwise_dequantize( + X_int8, self.W_int8.t(), state_X, self.state_W, self.bias + ).view(*x.size()[:-1], -1) + else: + return int8_matmul_mixed_dequanitze( + X_int8, self.W_int8.t(), state_X, self.state_W, self.bias + ).view(*x.size()[:-1], -1) + +SwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False) +SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True) +SwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True) + +# This is just the standard linear function. +class StandardLinearFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, input, weight, bias=None): + X = input.view(-1, input.size(-1)) + + ctx.save_for_backward(X, weight, bias) + output = input.matmul(weight.t()) + if bias is not None: + output += bias.unsqueeze(0).expand_as(output) + return output.view(*input.size()[:-1], -1) + + @staticmethod + def backward(ctx, grad_output_3D): + input, weight, bias = ctx.saved_tensors + + grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1)) + + grad_input = grad_weight = grad_bias = None + + if ctx.needs_input_grad[0]: + grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1) + if ctx.needs_input_grad[1]: + grad_weight = grad_output.t().matmul(input.to(grad_output.dtype)) + if bias is not None and ctx.needs_input_grad[2]: + grad_bias = grad_output.sum(0) + + return grad_input, grad_weight, grad_bias + +class StandardLinear(nn.Linear): + + def forward(self, x): + return StandardLinearFunction.apply(x, self.weight, self.bias) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..83a57bd9ff2896d8369e7d8033e2b71f2fcf97f4 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from bitsandbytes.cextension import COMPILED_WITH_CUDA + +from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit +from .adam import Adam, Adam8bit, Adam32bit, PagedAdam, PagedAdam8bit, PagedAdam32bit +from .adamw import AdamW, AdamW8bit, AdamW32bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit +from .lamb import LAMB, LAMB8bit, LAMB32bit +from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS +from .optimizer import GlobalOptimManager +from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit +from .lion import Lion, Lion8bit, Lion32bit, PagedLion, PagedLion8bit, PagedLion32bit +from .sgd import SGD, SGD8bit, SGD32bit diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c00519ff828dc2d7f8b910239480763d33eade55 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c84ecad0b083c2c96aa8655925f1132065f3f973 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adam.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36977319c306cb6a3308589a7bce24a52985630d Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adam.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb6f06f79c23a0a9eea1c0419fcb1624499a57f7 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f657875b87b47ecdc985ba615063e5685e31cffa Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lars.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd6abf3e87d5a1bf00b518617f30fe109bd72ad Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lars.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0267da8b631e30c6fd052745b76609742fb9619b Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47f85a36ac3c4c47c4ed3b1b4f81fcaa038832d5 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3953e8ea46d14ed9d545b71a25fc4eb5e1fa503d Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f13f6fbd8dfc555baf852fefc441e9b2cdf5d03c Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adagrad.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..7d8df58ac84ccc202b913ff488f3d8e7ae5b0ad5 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adagrad.py @@ -0,0 +1,132 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from bitsandbytes.optim.optimizer import Optimizer1State + + +class Adagrad(Optimizer1State): + def __init__( + self, + params, + lr=1e-2, + lr_decay=0, + weight_decay=0, + initial_accumulator_value=0, + eps=1e-10, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= weight_decay: + raise ValueError( + f"Invalid weight_decay value: {weight_decay}" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if initial_accumulator_value != 0.0: + raise ValueError("Initial accumulator value != 0.0 not supported!") + if lr_decay != 0.0: + raise ValueError("Lr Decay != 0.0 not supported!") + super().__init__( + "adagrad", + params, + lr, + (0.0, 0.0), + eps, + weight_decay, + optim_bits, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) + + +class Adagrad8bit(Optimizer1State): + def __init__( + self, + params, + lr=1e-2, + lr_decay=0, + weight_decay=0, + initial_accumulator_value=0, + eps=1e-10, + optim_bits=8, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= weight_decay: + raise ValueError( + f"Invalid weight_decay value: {weight_decay}" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if initial_accumulator_value != 0.0: + raise ValueError("Initial accumulator value != 0.0 not supported!") + if lr_decay != 0.0: + raise ValueError("Lr Decay != 0.0 not supported!") + assert block_wise + super().__init__( + "adagrad", + params, + lr, + (0.0, 0.0), + eps, + weight_decay, + 8, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) + + +class Adagrad32bit(Optimizer1State): + def __init__( + self, + params, + lr=1e-2, + lr_decay=0, + weight_decay=0, + initial_accumulator_value=0, + eps=1e-10, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= weight_decay: + raise ValueError( + f"Invalid weight_decay value: {weight_decay}" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if initial_accumulator_value != 0.0: + raise ValueError("Initial accumulator value != 0.0 not supported!") + if lr_decay != 0.0: + raise ValueError("Lr Decay != 0.0 not supported!") + super().__init__( + "adagrad", + params, + lr, + (0.0, 0.0), + eps, + weight_decay, + 32, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adam.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..86981eb86871dd572ed35b0a41f4bd10791577b2 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adam.py @@ -0,0 +1,273 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +import os + +import torch +import torch.distributed as dist + +import bitsandbytes.functional as F +from bitsandbytes.optim.optimizer import Optimizer2State + + +class Adam(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + +class Adam8bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + +class Adam32bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + +class PagedAdam(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class PagedAdam8bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class PagedAdam32bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class AnalysisAdam(torch.optim.Optimizer): + """Adam that performs 8-bit vs 32-bit error analysis. + + This implementation is modified from torch.optim.Adam based on: + `Fixed Weight Decay Regularization in Adam` + (see https://arxiv.org/abs/1711.05101) + + It has been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + + .. _Adam: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + amsgrad=False, + bnb_analysis="dynamic-blockwise", + savedir=None, + ): + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + ) + super().__init__(params, defaults) + self.analysis = bnb_analysis + self.savedir = savedir + + @property + def supports_memory_efficient_fp16(self): + return True + + @property + def supports_flat_params(self): + return True + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p_id, p in enumerate(group["params"]): + if p.grad is None: + continue + grad = p.grad.data + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + "Adam does not support sparse gradients, please consider SparseAdam instead" + ) + amsgrad = group.get("amsgrad", False) + assert not amsgrad + + p_data_fp32 = p.data + if p.data.dtype in {torch.float16, torch.bfloat16}: + p_data_fp32 = p_data_fp32.float() + + state = self.state[p] + + # State initialization + if len(state) == 0: + state["step"] = 0 + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like(p_data_fp32) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) + state["abserrors"] = torch.zeros( + (256, 256), device=p_data_fp32.device + ) + state["relerrors"] = torch.zeros( + (256, 256), device=p_data_fp32.device + ) + state["counts"] = torch.zeros( + (256, 256), device=p_data_fp32.device + ) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) + else: + state["exp_avg"] = state["exp_avg"].to(p_data_fp32) + state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) + if amsgrad: + state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( + p_data_fp32 + ) + + state["step"] += 1 + beta1, beta2 = group["betas"] + bias_correction1 = 1 - beta1 ** state["step"] + bias_correction2 = 1 - beta2 ** state["step"] + step_size = ( + group["lr"] * math.sqrt(bias_correction2) / bias_correction1 + ) + e = state["abserrors"] + rele = state["relerrors"] + counts = state["counts"] + + if group["weight_decay"] != 0: + p_data_fp32.add_( + p_data_fp32, alpha=-group["weight_decay"] * group["lr"] + ) + + exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] + if amsgrad: + max_exp_avg_sq = state["max_exp_avg_sq"] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = exp_avg_sq.sqrt().add_(group["eps"]) + update_fp32 = exp_avg / denom + + if ( + p_data_fp32.numel() <= 8192 + or p_data_fp32.numel() > 50000 * 1000 + ): + # embedding layer or too small + p_data_fp32 += -step_size * update_fp32 + else: + if self.analysis == "dynamic-blockwise": + code1 = F.create_dynamic_map(signed=True).to(p.device) + code2 = F.create_dynamic_map(signed=False).to(p.device) + C1, S1 = F.quantize_blockwise(exp_avg, code=code1) + state1 = F.dequantize_blockwise(C1, S1) + C2, S2 = F.quantize_blockwise(exp_avg_sq, code=code2) + state2 = F.dequantize_blockwise(C2, S2) + elif self.analysis == "dynamic": + code1 = F.create_dynamic_map(signed=True).to(p.device) + code2 = F.create_dynamic_map(signed=False).to(p.device) + C1, S1 = F.quantize(exp_avg, code=code1) + state1 = F.dequantize(C1, S1) + C2, S2 = F.quantize(exp_avg_sq, code=code2) + state2 = F.dequantize(C2, S2) + elif self.analysis == "linear": + code1 = F.create_linear_map(signed=True).to(p.device) + code2 = F.create_linear_map(signed=False).to(p.device) + C1, S1 = F.quantize(exp_avg, code=code1) + state1 = F.dequantize(C1, S1) + C2, S2 = F.quantize(exp_avg_sq, code=code2) + state2 = F.dequantize(C2, S2) + elif self.analysis == "quantile": + code1 = F.estimate_quantiles(exp_avg) + code2 = F.estimate_quantiles(exp_avg_sq) + C1 = F.quantize_no_absmax(exp_avg, code=code1) + state1 = F.dequantize_no_absmax(C1, code1) + C2 = F.quantize_no_absmax(exp_avg_sq, code=code2) + state2 = F.dequantize_no_absmax(C2, code2) + elif self.analysis == "my-quantization-routine": + pass + # 1. get code + # 2. quantize + # 3. dequantize + # Error will be calculated automatically! + else: + raise ValueError( + f"Invalid analysis value: {self.analysis}!" + ) + + denom = state2.sqrt().add_(group["eps"]) + update_8bit = state1 / denom + + abserr = torch.abs(update_8bit - update_fp32) + relerr = abserr / torch.abs(update_fp32 + 1e-6) + + C1, C2 = C1.int(), C2.int() + + F.histogram_scatter_add_2d(e, C1.int(), C2.int(), abserr) + F.histogram_scatter_add_2d(rele, C1.int(), C2.int(), relerr) + F.histogram_scatter_add_2d( + counts, C1.int(), C2.int(), torch.ones_like(abserr) + ) + + p_data_fp32 += -step_size * update_fp32 + + if not dist.is_initialized() or dist.get_rank() == 0: + if self.savedir != "" and state["step"] % 100 == 0: + if not os.path.exists(self.savedir): + os.makedirs(self.savedir) + shapestr = "_".join( + [str(dim) for dim in p_data_fp32.shape] + ) + pathe = os.path.join( + self.savedir, f"{p_id}_{shapestr}_abserr.pkl" + ) + pathrele = os.path.join( + self.savedir, f"{p_id}_{shapestr}_relerr.pkl" + ) + pathcounts = os.path.join( + self.savedir, f"{p_id}_{shapestr}_counts.pkl" + ) + torch.save(e, pathe) + torch.save(rele, pathrele) + torch.save(counts, pathcounts) + + if p.data.dtype in {torch.float16, torch.bfloat16}: + p.data.copy_(p_data_fp32) + + return loss diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adamw.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..21077f1a0a0394782a9de575d6bd9323eec9e4f5 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/adamw.py @@ -0,0 +1,39 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from bitsandbytes.optim.optimizer import Optimizer2State + + + +class AdamW(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged ) + +class AdamW8bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged ) + +class AdamW32bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + + +class PagedAdamW(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class PagedAdamW8bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class PagedAdamW32bit(Optimizer2State): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32, + args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lamb.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..1fbb6fadc0dd76ab8a16767aebacb200307247ff --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lamb.py @@ -0,0 +1,105 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from bitsandbytes.optim.optimizer import Optimizer2State + + +class LAMB(Optimizer2State): + def __init__( + self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + amsgrad=False, + adam_w_mode=True, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=False, + max_unorm=1.0, + ): + super().__init__( + "lamb", + params, + lr, + betas, + eps, + weight_decay, + optim_bits, + args, + min_8bit_size, + percentile_clipping, + block_wise, + max_unorm=1.0, + ) + + +class LAMB8bit(Optimizer2State): + def __init__( + self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + amsgrad=False, + adam_w_mode=True, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=False, + max_unorm=1.0, + ): + super().__init__( + "lamb", + params, + lr, + betas, + eps, + weight_decay, + 8, + args, + min_8bit_size, + percentile_clipping, + block_wise, + max_unorm=1.0, + ) + + +class LAMB32bit(Optimizer2State): + def __init__( + self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + amsgrad=False, + adam_w_mode=True, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=False, + max_unorm=1.0, + ): + super().__init__( + "lamb", + params, + lr, + betas, + eps, + weight_decay, + 32, + args, + min_8bit_size, + percentile_clipping, + block_wise, + max_unorm=1.0, + ) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lars.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lars.py new file mode 100644 index 0000000000000000000000000000000000000000..73554e3ccc3d9757c34e9f8dfb7fdbcb4709c3f4 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lars.py @@ -0,0 +1,210 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +from torch.optim import Optimizer + +from bitsandbytes.optim.optimizer import Optimizer1State + + +class LARS(Optimizer1State): + def __init__( + self, + params, + lr, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + max_unorm=0.02, + ): + if momentum == 0: + raise NotImplementedError( + "LARS without momentum is not supported!" + ) + super().__init__( + "lars", + params, + lr, + (momentum, dampening), + 0.0, + weight_decay, + optim_bits, + args, + min_8bit_size, + percentile_clipping, + max_unorm=max_unorm, + block_wise=False, + ) + + +class LARS8bit(Optimizer1State): + def __init__( + self, + params, + lr, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + max_unorm=0.02, + ): + if momentum == 0: + raise NotImplementedError( + "LARS without momentum is not supported!" + ) + super().__init__( + "lars", + params, + lr, + (momentum, dampening), + 0.0, + weight_decay, + 8, + args, + min_8bit_size, + percentile_clipping, + max_unorm=max_unorm, + block_wise=False, + ) + + +class LARS32bit(Optimizer1State): + def __init__( + self, + params, + lr, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + max_unorm=0.02, + ): + if momentum == 0: + raise NotImplementedError( + "LARS without momentum is not supported!" + ) + super().__init__( + "lars", + params, + lr, + (momentum, dampening), + 0.0, + weight_decay, + 32, + args, + min_8bit_size, + percentile_clipping, + max_unorm=max_unorm, + block_wise=False, + ) + + +class PytorchLARS(Optimizer): + def __init__( + self, + params, + lr=0.01, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + max_unorm=0.02, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError( + f"Invalid weight_decay value: {weight_decay}" + ) + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + max_unorm=max_unorm, + ) + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError( + "Nesterov momentum requires a momentum and zero dampening" + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + d_p_list = [] + momentum_buffer_list = [] + weight_decay = group["weight_decay"] + momentum = group["momentum"] + dampening = group["dampening"] + nesterov = group["nesterov"] + max_unorm = group["max_unorm"] + lr = group["lr"] + + for p in group["params"]: + if p.grad is None: + continue + + state = self.state[p] + d_p = p.grad + if weight_decay != 0: + d_p = d_p.add(p, alpha=weight_decay) + + if momentum != 0: + buf = state.get("momentum_buffer", None) + + if buf is None: + buf = torch.clone(d_p).detach() + state["momentum_buffer"] = buf + else: + buf.mul_(momentum).add_(d_p, alpha=1 - dampening) + + if nesterov: + update = d_p + buf * momentum + else: + update = buf + + update_scale = 1.0 + if max_unorm > 0.0: + assert p.dtype == torch.float32 + pnorm = torch.norm(p.detach()) + unorm = torch.norm(update) + if unorm > max_unorm * pnorm: + update_scale = max_unorm * pnorm / unorm + + p.add_(update, alpha=-lr * update_scale) + + return loss diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lion.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lion.py new file mode 100644 index 0000000000000000000000000000000000000000..2bde1a447558a3d1d8d2026899b010f488494583 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/lion.py @@ -0,0 +1,30 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from bitsandbytes.optim.optimizer import Optimizer1State + +class Lion(Optimizer1State): + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + +class Lion8bit(Optimizer1State): + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + +class Lion32bit(Optimizer1State): + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False): + super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged) + + +class PagedLion(Optimizer1State): + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, optim_bits=32, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + super().__init__("lion", params, lr, betas, 0., weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class PagedLion8bit(Optimizer1State): + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + super().__init__("lion", params, lr, betas, 0., weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) + +class PagedLion32bit(Optimizer1State): + def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0, args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True): + super().__init__("lion", params, lr, betas, 0., weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/optimizer.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..fb83eddf0171906b8cf203307a9ac5cc45169603 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/optimizer.py @@ -0,0 +1,724 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from collections import abc as container_abcs +from collections import defaultdict +from copy import deepcopy +from itertools import chain + +import torch + +import bitsandbytes.functional as F + + +class MockArgs: + def __init__(self, initial_data): + for key in initial_data: + setattr(self, key, initial_data[key]) + + +class GlobalOptimManager: + _instance = None + + def __init__(self): + raise RuntimeError("Call get_instance() instead") + + def initialize(self): + self.pid2config = {} + self.index2config = {} + self.optimizer = None + self.uses_config_override = False + self.module_weight_config_triple = [] + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + cls._instance.initialize() + return cls._instance + + def register_parameters(self, params): + param_groups = list(params) + if not isinstance(param_groups[0], dict): + param_groups = [{"params": param_groups}] + + for group_index, group in enumerate(param_groups): + for p_index, p in enumerate(group["params"]): + if id(p) in self.pid2config: + self.index2config[(group_index, p_index)] = self.pid2config[ + id(p) + ] + + def override_config( + self, parameters, key=None, value=None, key_value_dict=None + ): + """ + Overrides initial optimizer config for specific parameters. + + The key-values of the optimizer config for the input parameters are overridden + This can be both, optimizer parameters like "betas", or "lr" or it can be + 8-bit specific parameters like "optim_bits", "percentile_clipping". + + Parameters + ---------- + parameters : torch.Tensor or list(torch.Tensors) + The input parameters. + key : str + The hyperparamter to override. + value : object + The value for the hyperparamters. + key_value_dict : dict + A dictionary with multiple key-values to override. + """ + self.uses_config_override = True + if isinstance(parameters, torch.nn.Parameter): + parameters = [parameters] + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + if key is not None and value is not None: + assert key_value_dict is None + key_value_dict = {key: value} + + if key_value_dict is not None: + for p in parameters: + if id(p) in self.pid2config: + self.pid2config[id(p)].update(key_value_dict) + else: + self.pid2config[id(p)] = key_value_dict + + def register_module_override(self, module, param_name, config): + self.module_weight_config_triple.append((module, param_name, config)) + + +class Optimizer8bit(torch.optim.Optimizer): + def __init__(self, params, defaults, optim_bits=32, is_paged=False): + super().__init__(params, defaults) + self.initialized = False + self.name2qmap = {} + self.is_paged = is_paged + self.page_mng = F.GlobalPageManager.get_instance() + + self.mng = GlobalOptimManager.get_instance() + self.non_castable_tensor_keys = { + "qmap1", + "qmap2", + "max1", + "max2", + "new_max1", + "new_max2", + "state1", + "state2", + "gnorm_vec", + "absmax1", + "absmax2", + "unorm_vec", + } + + if optim_bits == 8: + self.fill_qmap() + + def fill_qmap(self): + self.name2qmap["dynamic"] = F.create_dynamic_map(signed=True) + self.name2qmap["udynamic"] = F.create_dynamic_map(signed=False) + + def __setstate__(self, state): + super().__setstate__(state) + + def load_state_dict(self, state_dict): + r"""Loads the optimizer state. + + Args: + state_dict (dict): optimizer state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # deepcopy, to be consistent with module API + state_dict = deepcopy(state_dict) + # Validate the state_dict + groups = self.param_groups + saved_groups = state_dict["param_groups"] + + if len(groups) != len(saved_groups): + raise ValueError( + "loaded state dict has a different number of " + "parameter groups" + ) + param_lens = (len(g["params"]) for g in groups) + saved_lens = (len(g["params"]) for g in saved_groups) + if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): + raise ValueError( + "loaded state dict contains a parameter group " + "that doesn't match the size of optimizer's group" + ) + + # Update the state + id_map = { + old_id: p + for old_id, p in zip( + chain.from_iterable(g["params"] for g in saved_groups), + chain.from_iterable(g["params"] for g in groups), + ) + } + + def cast(param, value): + r"""Make a deep copy of value, casting all tensors to device of param.""" + if isinstance(value, torch.Tensor): + # Floating-point types are a bit special here. They are the only ones + # that are assumed to always match the type of params. + if param.is_floating_point() and value.dtype != torch.uint8: + value = value.to(param.dtype) + return value + elif isinstance(value, dict): + for k, v in value.items(): + if k in self.non_castable_tensor_keys: + value[k] = v.to(param.device) + else: + value[k] = cast(param, v) + + return value + elif isinstance(value, container_abcs.Iterable): + return type(value)(cast(param, v) for v in value) + else: + return value + + # Copy state assigned to params (and cast tensors to appropriate types). + # State that is not assigned to params is copied as is (needed for + # backward compatibility). + state = defaultdict(dict) + for k, v in state_dict["state"].items(): + if k in id_map: + param = id_map[k] + state[param] = cast(param, v) + else: + state[k] = v + + # Update parameter groups, setting their 'params' value + def update_group(group, new_group): + new_group["params"] = group["params"] + return new_group + + param_groups = [ + update_group(g, ng) for g, ng in zip(groups, saved_groups) + ] + self.__setstate__({"state": state, "param_groups": param_groups}) + + def to_gpu(self): + for gindex, group in enumerate(self.param_groups): + for pindex, p in enumerate(group["params"]): + if p in self.state: + values = self.state[p] + for k, v in values.items(): + if isinstance(v, torch.Tensor): + is_paged = getattr(v, 'is_paged', False) + if not is_paged: + self.state[p][k] = v.to(p.device) + + def check_overrides(self): + for module, attr, config in self.mng.module_weight_config_triple: + pmodule = getattr(module, attr) + assert pmodule is not None + assert isinstance(pmodule, torch.Tensor) or isinstance( + pmodule, torch.Parameter + ) + found = False + for gindex, group in enumerate(self.param_groups): + if found: + break + for pindex, p in enumerate(group["params"]): + if found: + break + if id(p) == id(pmodule): + # found the matching parameter + # init override + self.mng.pid2config[id(p)] = config + self.mng.index2config[ + (gindex, pindex) + ] = self.mng.pid2config[id(p)] + found = True + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + overflows = [] + + if not self.initialized: + self.check_overrides() + self.to_gpu() # needed for fairseq pure fp16 training + self.initialized = True + + #if self.is_paged: self.page_mng.prefetch_all() + for gindex, group in enumerate(self.param_groups): + for pindex, p in enumerate(group["params"]): + if p.grad is None: + continue + state = self.state[p] + if len(state) == 0: + self.init_state(group, p, gindex, pindex) + + self.prefetch_state(p) + self.update_step(group, p, gindex, pindex) + torch.cuda.synchronize() + if self.is_paged: + # all paged operation are asynchronous, we need + # to sync to make sure all tensors are in the right state + torch.cuda.synchronize() + + + return loss + + def get_config(self, gindex, pindex, group): + config = {} + config["betas"] = group["betas"] + config["eps"] = group["eps"] + config["weight_decay"] = group["weight_decay"] + config["lr"] = group["lr"] + config["optim_bits"] = self.args.optim_bits + config["min_8bit_size"] = self.args.min_8bit_size + config["percentile_clipping"] = self.args.percentile_clipping + config["block_wise"] = self.args.block_wise + config["max_unorm"] = self.args.max_unorm + config["skip_zeros"] = self.args.skip_zeros + + if (gindex, pindex) in self.mng.index2config: + config.update(self.mng.index2config[(gindex, pindex)]) + return config + + def init_state(self, group, p, gindex, pindex): + raise NotImplementedError("init_state method needs to be overridden") + + def update_step(self, group, p, gindex, pindex): + raise NotImplementedError( + "The update_step method needs to be overridden" + ) + + def get_state_buffer(self, p, dtype=torch.float32): + if not self.is_paged or p.numel() < 1e5: + return torch.zeros_like(p, dtype=dtype, device=p.device) + else: + # > 1 MB + buff = F.get_paged(*p.shape, dtype=dtype, device=p.device) + F.fill(buff, 0) + self.page_mng.paged_tensors.append(buff) + return buff + + def prefetch_state(self, p): + if self.is_paged: + state = self.state[p] + s1 = state['state1'] + is_paged = getattr(s1, 'is_paged', False) + if is_paged: + F.prefetch_tensor(state['state1']) + if 'state2' in state: + F.prefetch_tensor(state['state2']) + + +class Optimizer2State(Optimizer8bit): + def __init__( + self, + optimizer_name, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0.0, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + max_unorm=0.0, + skip_zeros=False, + is_paged=False + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if isinstance(betas, str): + # format: '(beta1, beta2)' + betas = betas.replace("(", "").replace(")", "").strip().split(",") + betas = [float(b) for b in betas] + for i in range(len(betas)): + if not 0.0 <= betas[i] < 1.0: + raise ValueError( + f"Invalid beta parameter at index {i}: {betas[i]}" + ) + if not 0.0 <= weight_decay: + raise ValueError( + f"Invalid weight_decay value: {weight_decay}" + ) + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits, is_paged) + + if args is None: + args = {} + args["optim_bits"] = optim_bits + args["percentile_clipping"] = 100 + args["min_8bit_size"] = min_8bit_size + args["percentile_clipping"] = percentile_clipping + args["block_wise"] = block_wise + args["max_unorm"] = max_unorm + args["skip_zeros"] = skip_zeros + + self.args = MockArgs(args) + else: + self.args = args + + self.optimizer_name = optimizer_name + + @torch.no_grad() + def init_state(self, group, p, gindex, pindex): + config = self.get_config(gindex, pindex, group) + + if config["optim_bits"] == 32: + dtype = torch.float32 + elif config["optim_bits"] == 8: + dtype = torch.uint8 + else: + raise NotImplementedError( + f'Amount of optimizer bits not supported: {config["optim_bits"]}' + ) + + if p.numel() < config["min_8bit_size"]: + dtype = torch.float32 + + state = self.state[p] + state["step"] = 0 + + if dtype == torch.float32 or ( + dtype == torch.uint8 and p.numel() < 4096 + ): + state["state1"] = self.get_state_buffer(p, dtype=torch.float32) + state["state2"] = self.get_state_buffer(p, dtype=torch.float32) + elif dtype == torch.uint8: + if state["step"] == 0: + if "dynamic" not in self.name2qmap: + self.fill_qmap() + self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to( + p.device + ) + self.name2qmap["udynamic"] = self.name2qmap["udynamic"].to( + p.device + ) + + state["state1"] = self.get_state_buffer(p, dtype=torch.uint8) + state["qmap1"] = self.name2qmap["dynamic"] + + state["state2"] = self.get_state_buffer(p, dtype=torch.uint8) + state["qmap2"] = self.name2qmap["udynamic"] + + if config["block_wise"]: + n = p.numel() + blocks = n // 2048 + blocks += 1 if n % 2048 > 0 else 0 + + state["absmax1"] = torch.zeros( + (blocks,), dtype=torch.float32, device=p.device + ) + state["absmax2"] = torch.zeros( + (blocks,), dtype=torch.float32, device=p.device + ) + else: + state["max1"] = torch.zeros( + (1,), dtype=torch.float32, device=p.device + ) + state["new_max1"] = torch.zeros( + (1,), dtype=torch.float32, device=p.device + ) + state["max2"] = torch.zeros( + (1,), dtype=torch.float32, device=p.device + ) + state["new_max2"] = torch.zeros( + (1,), dtype=torch.float32, device=p.device + ) + + if config["percentile_clipping"] < 100: + state["gnorm_vec"] = torch.zeros((100,), device=p.device) + + if config["max_unorm"] > 0.0: + state["unorm_vec"] = torch.zeros((1,), device=p.device) + + @torch.no_grad() + def update_step(self, group, p, gindex, pindex): + state = self.state[p] + grad = p.grad + + config = self.get_config(gindex, pindex, group) + + state["step"] += 1 + step = state["step"] + + if config["percentile_clipping"] < 100: + current_gnorm, clip_value, gnorm_scale = F.percentile_clipping( + grad, state["gnorm_vec"], step, config["percentile_clipping"] + ) + else: + gnorm_scale = 1.0 + + if state["state1"].dtype == torch.float: + F.optimizer_update_32bit( + self.optimizer_name, + grad, + p, + state["state1"], + config["betas"][0], + config["eps"], + step, + config["lr"], + state["state2"], + config["betas"][1], + config["weight_decay"], + gnorm_scale, + state["unorm_vec"] if config["max_unorm"] > 0.0 else None, + max_unorm=config["max_unorm"], + skip_zeros=config["skip_zeros"], + ) + + elif state["state1"].dtype == torch.uint8 and not config["block_wise"]: + F.optimizer_update_8bit( + self.optimizer_name, + grad, + p, + state["state1"], + state["state2"], + config["betas"][0], + config["betas"][1], + config["eps"], + step, + config["lr"], + state["qmap1"], + state["qmap2"], + state["max1"], + state["max2"], + state["new_max1"], + state["new_max2"], + config["weight_decay"], + gnorm_scale=gnorm_scale, + unorm_vec=state["unorm_vec"] + if config["max_unorm"] > 0.0 + else None, + max_unorm=config["max_unorm"], + ) + + # swap maxes + state["max1"], state["new_max1"] = state["new_max1"], state["max1"] + state["max2"], state["new_max2"] = state["new_max2"], state["max2"] + elif state["state1"].dtype == torch.uint8 and config["block_wise"]: + F.optimizer_update_8bit_blockwise( + self.optimizer_name, + grad, + p, + state["state1"], + state["state2"], + config["betas"][0], + config["betas"][1], + config["eps"], + step, + config["lr"], + state["qmap1"], + state["qmap2"], + state["absmax1"], + state["absmax2"], + config["weight_decay"], + gnorm_scale=gnorm_scale, + skip_zeros=config["skip_zeros"], + ) + + +class Optimizer1State(Optimizer8bit): + def __init__( + self, + optimizer_name, + params, + lr=1e-3, + betas=(0.9, 0.0), + eps=1e-8, + weight_decay=0.0, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + max_unorm=0.0, + skip_zeros=False, + is_paged=False + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + for i in range(len(betas)): + if not 0.0 <= betas[i] < 1.0: + raise ValueError( + f"Invalid beta parameter at index {i}: {betas[i]}" + ) + if not 0.0 <= weight_decay: + raise ValueError( + f"Invalid weight_decay value: {weight_decay}" + ) + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) + super().__init__(params, defaults, optim_bits, is_paged) + + if args is None: + args = {} + args["optim_bits"] = optim_bits + args["percentile_clipping"] = 100 + args["min_8bit_size"] = min_8bit_size + args["percentile_clipping"] = percentile_clipping + args["block_wise"] = block_wise + args["max_unorm"] = max_unorm + args["skip_zeros"] = skip_zeros + + self.args = MockArgs(args) + else: + self.args = args + + self.optimizer_name = optimizer_name + + @torch.no_grad() + def init_state(self, group, p, gindex, pindex): + config = self.get_config(gindex, pindex, group) + + if config["optim_bits"] == 32: + dtype = torch.float32 + elif config["optim_bits"] == 8: + dtype = torch.uint8 + else: + raise NotImplementedError( + f'Amount of optimizer bits not supported: {config["optim_bits"]}' + ) + + if p.numel() < config["min_8bit_size"]: + dtype = torch.float32 + + state = self.state[p] + state["step"] = 0 + + if dtype == torch.float32 or ( + dtype == torch.uint8 and p.numel() < 4096 + ): + state["state1"] = self.get_state_buffer(p, dtype=torch.float32) + elif dtype == torch.uint8: + if state["step"] == 0: + if "dynamic" not in self.name2qmap: + self.fill_qmap() + self.name2qmap["dynamic"] = self.name2qmap["dynamic"].to( + p.device + ) + + state["state1"] = self.get_state_buffer(p, dtype=torch.uint8) + state["qmap1"] = self.name2qmap["dynamic"] + + if config["block_wise"]: + n = p.numel() + blocks = n // 2048 + blocks += 1 if n % 2048 > 0 else 0 + + state["absmax1"] = torch.zeros( + (blocks,), dtype=torch.float32, device=p.device + ) + else: + state["max1"] = torch.zeros( + (1,), dtype=torch.float32, device=p.device + ) + state["new_max1"] = torch.zeros( + (1,), dtype=torch.float32, device=p.device + ) + + if config["percentile_clipping"] < 100: + state["gnorm_vec"] = torch.zeros((100,), device=p.device) + + if config["max_unorm"] > 0.0: + state["unorm_vec"] = torch.zeros((1,), device=p.device) + + @torch.no_grad() + def update_step(self, group, p, gindex, pindex): + state = self.state[p] + grad = p.grad + + config = self.get_config(gindex, pindex, group) + + state["step"] += 1 + step = state["step"] + + if config["percentile_clipping"] < 100: + current_gnorm, clip_value, gnorm_scale = F.percentile_clipping( + grad, state["gnorm_vec"], step, config["percentile_clipping"] + ) + else: + gnorm_scale = 1.0 + + if state["state1"].dtype == torch.float: + F.optimizer_update_32bit( + self.optimizer_name, + grad, + p, + state["state1"], + config["betas"][0], + config["eps"], + step, + config["lr"], + None, + config['betas'][1], + config["weight_decay"], + gnorm_scale, + state["unorm_vec"] if config["max_unorm"] > 0.0 else None, + max_unorm=config["max_unorm"], + skip_zeros=config["skip_zeros"], + ) + + elif state["state1"].dtype == torch.uint8 and not config["block_wise"]: + F.optimizer_update_8bit( + self.optimizer_name, + grad, + p, + state["state1"], + None, + config["betas"][0], + config["betas"][1], + config["eps"], + step, + config["lr"], + state["qmap1"], + None, + state["max1"], + None, + state["new_max1"], + None, + config["weight_decay"], + gnorm_scale, + state["unorm_vec"] if config["max_unorm"] > 0.0 else None, + max_unorm=config["max_unorm"], + ) + + state["max1"], state["new_max1"] = state["new_max1"], state["max1"] + elif state["state1"].dtype == torch.uint8 and config["block_wise"]: + F.optimizer_update_8bit_blockwise( + self.optimizer_name, + grad, + p, + state["state1"], + None, + config["betas"][0], + config["betas"][1], + config["eps"], + step, + config["lr"], + state["qmap1"], + None, + state["absmax1"], + None, + config["weight_decay"], + gnorm_scale=gnorm_scale, + skip_zeros=config["skip_zeros"], + ) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/rmsprop.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/rmsprop.py new file mode 100644 index 0000000000000000000000000000000000000000..2853ca723400b76c38c9e2fb51a0cc11a8b16dde --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/rmsprop.py @@ -0,0 +1,115 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from bitsandbytes.optim.optimizer import Optimizer1State + + +class RMSprop(Optimizer1State): + def __init__( + self, + params, + lr=1e-2, + alpha=0.99, + eps=1e-8, + weight_decay=0, + momentum=0, + centered=False, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if alpha == 0: + raise NotImplementedError( + "RMSprop with alpha==0.0 is not supported!" + ) + if centered: + raise NotImplementedError("Centered RMSprop is not supported!") + super().__init__( + "rmsprop", + params, + lr, + (alpha, momentum), + eps, + weight_decay, + optim_bits, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) + + +class RMSprop8bit(Optimizer1State): + def __init__( + self, + params, + lr=1e-2, + alpha=0.99, + eps=1e-8, + weight_decay=0, + momentum=0, + centered=False, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if alpha == 0: + raise NotImplementedError( + "RMSprop with alpha==0.0 is not supported!" + ) + if centered: + raise NotImplementedError("Centered RMSprop is not supported!") + super().__init__( + "rmsprop", + params, + lr, + (alpha, momentum), + eps, + weight_decay, + 8, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) + + +class RMSprop32bit(Optimizer1State): + def __init__( + self, + params, + lr=1e-2, + alpha=0.99, + eps=1e-8, + weight_decay=0, + momentum=0, + centered=False, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + + if alpha == 0: + raise NotImplementedError( + "RMSprop with alpha==0.0 is not supported!" + ) + if centered: + raise NotImplementedError("Centered RMSprop is not supported!") + super().__init__( + "rmsprop", + params, + lr, + (alpha, momentum), + eps, + weight_decay, + 32, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/optim/sgd.py b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..3c0fc2b9f25affb364567098695ac2bf1641f7be --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/optim/sgd.py @@ -0,0 +1,99 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +from bitsandbytes.optim.optimizer import Optimizer1State + + +class SGD(Optimizer1State): + def __init__( + self, + params, + lr, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + optim_bits=32, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if momentum == 0: + raise NotImplementedError("SGD without momentum is not supported!") + super().__init__( + "momentum", + params, + lr, + (momentum, dampening), + 0.0, + weight_decay, + optim_bits, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) + + +class SGD8bit(Optimizer1State): + def __init__( + self, + params, + lr, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if momentum == 0: + raise NotImplementedError("SGD without momentum is not supported!") + super().__init__( + "momentum", + params, + lr, + (momentum, dampening), + 0.0, + weight_decay, + 8, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) + + +class SGD32bit(Optimizer1State): + def __init__( + self, + params, + lr, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + args=None, + min_8bit_size=4096, + percentile_clipping=100, + block_wise=True, + ): + if momentum == 0: + raise NotImplementedError("SGD without momentum is not supported!") + super().__init__( + "momentum", + params, + lr, + (momentum, dampening), + 0.0, + weight_decay, + 32, + args, + min_8bit_size, + percentile_clipping, + block_wise, + ) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/research/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47b720d78ee04c494b10b3ec2245687df4b673cf --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/research/__init__.py @@ -0,0 +1,6 @@ +from . import nn +from .autograd._functions import ( + switchback_bnb, + matmul_fp8_global, + matmul_fp8_mixed, +) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbf50b06d58b411ac767f620cc0135ffb5e46da1 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfc354db87ae496bdbe5b233c19740035115ff8f Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0403256067ca66c93bc1a37fd8d4e831162758e0 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..0dff351e00540bb79a827cd7baf0ccf5a8dc64ca --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py @@ -0,0 +1,411 @@ +import operator +import warnings +from dataclasses import dataclass +from functools import reduce # Required in Python 3 + +import torch + +import bitsandbytes.functional as F + +from bitsandbytes.autograd._functions import MatmulLtState, GlobalOutlierPooler + + +# math.prod not compatible with python < 3.8 +def prod(iterable): + return reduce(operator.mul, iterable, 1) + +tensor = torch.Tensor + +class MatMulFP8Mixed(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + + @staticmethod + def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024): + # default of pytorch behavior if inputs are empty + ctx.is_empty = False + if prod(A.shape) == 0: + ctx.is_empty = True + ctx.A = A + ctx.B = B + + B_shape = B.shape + if A.shape[-1] == B_shape[0]: + return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device) + else: + return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device) + + # 1. Dequantize + # 2. MatmulnN + cA, state = F.quantize_blockwise(A, code=fw_code, blocksize=bsz) + fp8A = F.dequantize_blockwise(cA, state, blocksize=bsz).to(A.dtype) + + cB, state = F.quantize(B.float(), code=fw_code) + fp8B = F.dequantize(cB, state).to(B.dtype) + + output = torch.matmul(fp8A, fp8B) + + # output is half + + # 3. Save state + ctx.fw_code = fw_code + ctx.bw_code = bw_code + ctx.bsz = bsz + ctx.bsz2 = bsz2 + ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype + + if any(ctx.needs_input_grad[:2]): + # NOTE: we send back A, and re-quant. + ctx.tensors = (A, fp8B) + else: + ctx.tensors = (None, None) + + return output + + @staticmethod + def backward(ctx, grad_output): + if ctx.is_empty: + return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None + + req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad + A, B = ctx.tensors + + grad_A, grad_B = None, None + + # TODO: Fix blocksize to be output_dim + cgrad_out, state = F.quantize_blockwise(grad_output, code=ctx.bw_code, blocksize=ctx.bsz2) + fp8out = F.dequantize_blockwise(cgrad_out, state, blocksize=ctx.bsz2).to(grad_output.dtype) + + # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code) + # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype) + + # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() + # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector') + # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose + # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2]) + + # not supported by PyTorch. TODO: create work-around + if req_gradA: + grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype) + + if req_gradB: + if len(A.shape) == 3: + At = A.transpose(2, 1).contiguous() + else: + At = A.transpose(1, 0).contiguous() + # cA, state = F.quantize(At.float(), code=ctx.fw_code) + # fp8At = F.dequantize(cA, state).to(A.dtype) + grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype) + + return grad_A, grad_B, None, None, None, None, None + + +class MatMulFP8Global(torch.autograd.Function): + # forward is the same, but we added the fallback for pre-turing GPUs + # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None") + + @staticmethod + def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024): + # default of pytorch behavior if inputs are empty + ctx.is_empty = False + if prod(A.shape) == 0: + ctx.is_empty = True + ctx.A = A + ctx.B = B + + B_shape = B.shape + if A.shape[-1] == B_shape[0]: + return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device) + else: + return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device) + + # 1. Dequantize + # 2. MatmulnN + cA, state = F.quantize(A.float(), code=fw_code) + fp8A = F.dequantize(cA, state).to(A.dtype) + + cB, state = F.quantize(B.float(), code=fw_code) + fp8B = F.dequantize(cB, state).to(B.dtype) + + output = torch.matmul(fp8A, fp8B) + + # output is half + + # 3. Save state + ctx.fw_code = fw_code + ctx.bw_code = bw_code + ctx.bsz = bsz + ctx.bsz2 = bsz2 + ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype + + if any(ctx.needs_input_grad[:2]): + # NOTE: we send back A, and re-quant. + ctx.tensors = (A, fp8B) + else: + ctx.tensors = (None, None) + + return output + + @staticmethod + def backward(ctx, grad_output): + if ctx.is_empty: + return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None + + req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad + A, B = ctx.tensors + + grad_A, grad_B = None, None + + # TODO: Fix blocksize to be output_dim + cgrad_out, state = F.quantize(grad_output.float(), code=ctx.bw_code) + fp8out = F.dequantize(cgrad_out, state).to(grad_output.dtype) + + # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code) + # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype) + + # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous() + # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector') + # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose + # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2]) + + # not supported by PyTorch. TODO: create work-around + if req_gradA: + grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype) + + if req_gradB: + if len(A.shape) == 3: + At = A.transpose(2, 1).contiguous() + else: + At = A.transpose(1, 0).contiguous() + cA, state = F.quantize(At.float(), code=ctx.fw_code) + fp8At = F.dequantize(cA, state).to(A.dtype) + grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype) + + return grad_A, grad_B, None, None, None, None, None + + +class SwitchBackBnb(torch.autograd.Function): + @staticmethod + def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): + # default to pytorch behavior if inputs are empty + ctx.is_empty = False + if prod(A.shape) == 0: + ctx.is_empty = True + ctx.A = A + ctx.B = B + ctx.bias = bias + if A.shape[-1] == B.shape[0]: + return torch.empty(A.shape[:-1]+B.shape[1:], dtype=A.dtype, device=A.device) + else: + return torch.empty(A.shape[:-1]+B.shape[:1], dtype=A.dtype, device=A.device) + + # 1. Quantize A + # 2. Quantize B + # 3. Matmul + # 4. Mixed-precision decomposition matmul + # 5. Save state + formatB = state.formatB + input_shape = A.shape + if state.outlier_pool is None: + state.outlier_pool = GlobalOutlierPooler.get_instance() + + # Cast A to fp16 + if A.dtype != torch.float16: + warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization") + + # 1. Quantize A + if len(A.shape) == 3: + A = A.view(-1, A.shape[-1]).contiguous() + CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant( + A.to(torch.float16), threshold=state.threshold + ) + + if state.threshold > 0.0 and coo_tensorA is not None: + if state.has_fp16_weights: + idx = torch.unique(coo_tensorA.colidx).long() + CA[:, idx] = 0 + CAt[:, idx] = 0 + subA = A[:, idx] + state.subB = B[:, idx].t().contiguous() + state.idx = idx + else: + if state.CxB is None: + # B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions + # we also need to convert it to the turing/ampere format + state.CxB, state.SB = F.transform(state.CB, to_order=formatB) + else: + #print('A shape', A.shape) + if not state.has_fp16_weights and state.CxB is None: + state.CxB, state.SB = F.transform(state.CB, to_order=formatB) + subA = None + + # 2. Quantize B + if state.has_fp16_weights: + #print('B shape', B.shape) + has_grad = True if (getattr(B, "grad", None) is not None) else False + is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1) + if is_transposed: + B = B.contiguous() + + if (state.is_training and not has_grad) or state.CxB is None: + state.reset_grads() + ( + CB, + state.CBt, + state.SCB, + state.SCBt, + coo_tensorB, + ) = F.double_quant(B.to(torch.float16)) + state.CxB, state.SB = F.transform(CB, to_order=formatB) + else: + has_grad = False + + if coo_tensorA is not None and not state.has_fp16_weights: + # extract outliers + + outlier_idx = torch.unique(coo_tensorA.colidx) + state.idx = outlier_idx + # state.outlier_pool.add_outliers(outlier_idx, A.shape[-1]) + # if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]: + # # do not use pool for 2nd FFN layer + # state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device) + # else: + # state.idx = outlier_idx + outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int()) + state.subB = ( + (outliers * state.SCB.view(-1, 1) / 127.0) + .t() + .contiguous() + .to(A.dtype) + ) + CA[:, state.idx.long()] = 0 + CAt[:, state.idx.long()] = 0 + subA = A[:, state.idx.long()] + + shapeB = state.SB[0] + + if len(input_shape) == 3: + output_shape = (input_shape[0], input_shape[1], shapeB[0]) + else: + output_shape = (input_shape[0], shapeB[0]) + + # 3. Matmul + C32A, SA = F.transform(CA, "col32") + out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB) + # we apply the fused bias here + + if bias is None or bias.dtype == torch.float16: + output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias) + output = output.to(A.dtype) + else: # apply bias separately + output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None) + output = output.to(A.dtype).add_(bias) + + # 4. Mixed-precision decomposition matmul + if coo_tensorA is not None and subA is not None: + output += torch.matmul(subA, state.subB) + + # 5. Save state + ctx.state = state + + ctx.formatB = formatB + ctx.grad_shape = input_shape + ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype + + if any(ctx.needs_input_grad[:2]): + ctx.tensors = (CAt, subA, A) + ctx.tensor_states = (SCAt, state.idx) + else: + ctx.tensors = [None, None, None] + ctx.tensor_states = (None, None) + ctx.save_for_backward(None, None) + + + clone_func = torch.clone if len(output_shape) == 3 else lambda x : x + return clone_func(output.view(output_shape)) + + @staticmethod + def backward(ctx, grad_output): + if ctx.is_empty: + bias_grad = (None if ctx.bias is None else torch.zeros_like(ctx.bias)) + return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None + req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad + CAt, subA, A = ctx.tensors + SCAt, idx = ctx.tensor_states + formatB = ctx.formatB + state = ctx.state + grad_A = grad_B = grad_bias = None + + if req_gradBias: + # compute grad_bias first before changing grad_output dtype + grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias) + + # Cast grad_output to fp16 + if len(grad_output.shape) == 3: + grad_output = grad_output.reshape( + -1, grad_output.shape[-1] + ).contiguous() + + Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16)) + + if req_gradB: + # print('back A shape', A.shape) + # print('grad output t shape', grad_output.t().shape) + grad_B = torch.matmul(grad_output.t(), A) + + if req_gradA: + if state.CBt is not None: + C32grad, Sgrad = F.transform(Cgrad, "col32") + if state.CxBt is None: + state.CxBt, state.SBt = F.transform( + state.CBt, to_order=formatB, transpose=True + ) + # print('back B shape', state.CxBt.shape) + # print('back grad shape', C32grad.shape) + gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt) + grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A) + + elif state.CB is not None: + CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1. / 127.0)) + grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A) + else: + raise Exception('State must contain either CBt or CB matrix for backward') + + return grad_A, grad_B, None, grad_bias, None + +def get_block_sizes(input_matrix, weight_matrix): + input_features = input_matrix.shape[-1] + output_features = (weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]) + array = [4096, 2048, 1024, 512, 256, 128, 64, 0] + bsz, bsz2 = 1024, 1024 + for i, k in enumerate(array): + if input_features > array[i + 1]: + bsz = k + break + for i, k in enumerate(array): + if output_features > array[i + 1]: + bsz2 = k + break + + return bsz, bsz2 + +def matmul_fp8_global(A: tensor, B: tensor, fw_code: tensor, bw_code: tensor, out: tensor = None, bsz : int = -1, bsz2 : int = -1): + if bsz == -1 or bsz2 == -1: bsz, bsz2 = get_block_sizes(A, B) + return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2) + +def matmul_fp8_mixed(A: tensor, B: tensor, fw_code: tensor, bw_code: tensor, out: tensor = None, bsz : int = -1, bsz2 : int = -1): + if bsz == -1 or bsz2 == -1: bsz, bsz2 = get_block_sizes(A, B) + return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2) + +def switchback_bnb( + A: tensor, + B: tensor, + out: tensor = None, + state: MatmulLtState = None, + threshold=0.0, + bias=None +): + state = state or MatmulLtState() + if threshold > 0.0: + state.threshold = threshold + return SwitchBackBnb.apply(A, B, out, bias, state) diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8faec10bb17333e145779fd488aad94310c67f28 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py @@ -0,0 +1 @@ +from .modules import LinearFP8Mixed, LinearFP8Global diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d86764503501ed7faed2ee7c935617e898d37298 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..539f952b743dcfda1f9e06d6d639b28e82c440a5 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..2a46b40c36c03c364119cd3bac08e51d543e3ccb --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py @@ -0,0 +1,64 @@ +from typing import Optional, TypeVar, Union, overload + +import torch +import torch.nn.functional as F +from torch import Tensor, device, dtype, nn + +import bitsandbytes as bnb +from bitsandbytes.optim import GlobalOptimManager +from bitsandbytes.utils import OutlierTracer, find_outlier_dims + +T = TypeVar("T", bound="torch.nn.Module") + + +class LinearFP8Mixed(nn.Linear): + def __init__(self, input_features, output_features, bias=True): + super().__init__(input_features, output_features, bias) + self.bw_code = None + self.fw_code = None + array = [4096, 2048, 1024, 512, 256, 128, 64, 0] + for i, k in enumerate(array): + if input_features > array[i + 1]: + self.bsz = k + break + for i, k in enumerate(array): + if output_features > array[i + 1]: + self.bsz2 = k + break + + def forward(self, x: torch.Tensor): + if self.fw_code is None: + self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device) + self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device) + + out = bnb.research.matmul_fp8_mixed(x, self.weight.t(), fw_code=self.fw_code, bw_code=self.bw_code, bsz=self.bsz, bsz2=self.bsz2) + if self.bias is not None: + out += self.bias + + return out + +class LinearFP8Global(nn.Linear): + def __init__(self, input_features, output_features, bias=True): + super().__init__(input_features, output_features, bias) + self.bw_code = None + self.fw_code = None + array = [4096, 2048, 1024, 512, 256, 128, 64, 0] + for i, k in enumerate(array): + if input_features > array[i + 1]: + self.bsz = k + break + for i, k in enumerate(array): + if output_features > array[i + 1]: + self.bsz2 = k + break + + def forward(self, x: torch.Tensor): + if self.fw_code is None: + self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device) + self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device) + + out = bnb.matmul_fp8_global(x, self.weight.t(), fw_code=self.fw_code, bw_code=self.bw_code, bsz=self.bsz, bsz2=self.bsz2) + if self.bias is not None: + out += self.bias + + return out diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe0be99bf84704d671f26a8f5593d8bd3dc08293 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..150f54a36055cd03ed509206d72925d072186f7d Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequanitze.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequanitze.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..525e29994dafcf6c4b7ca06988f6c09221019c43 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequanitze.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a38ec850ee4f0fc260065f185816e908c9246f5 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d419e7c1ce605f36be4d069ab53317a831233e7 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8befb83ebbf7cefba52ceaa155ccd5440a6aa9b Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd44a6160f5fe3276dc68fbbc99874687f07750 Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61a7c97407aa3c5bff1a8dfa77d8cfb7801fda3f Binary files /dev/null and b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc differ diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py new file mode 100644 index 0000000000000000000000000000000000000000..e092680b845cb2301ce86ca9e1c30953dbf007ec --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py @@ -0,0 +1,64 @@ +import math +import torch +import time +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor): return None +else: + + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # rowwise quantize + + # TODO: autotune this better. + @triton.autotune( + configs=[ + triton.Config({}, num_stages=1, num_warps=8), + triton.Config({}, num_stages=2, num_warps=8), + triton.Config({}, num_stages=4, num_warps=8), + triton.Config({}, num_stages=8, num_warps=8), + triton.Config({}, num_stages=1), + triton.Config({}, num_stages=2), + triton.Config({}, num_stages=4), + triton.Config({}, num_stages=8), + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + ], + key=['n_elements'] + ) + @triton.jit + def _dequantize_rowwise( + x_ptr, + state_x, + output_ptr, + inv_127, + n_elements, + BLOCK_SIZE: tl.constexpr, + P2: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + arange = tl.arange(0, P2) + offsets = block_start + arange + row_mask = arange < BLOCK_SIZE + x = tl.load(x_ptr + offsets, mask=row_mask) + max_val = tl.load(state_x + pid) + output = max_val * x * inv_127 + tl.store(output_ptr + offsets, output, mask=row_mask) + + + def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor): + output = torch.empty(*x.shape, device=x.device, dtype=torch.float16) + + P2 = int(2 ** (math.ceil(math.log2(x.shape[1])))) + + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (x.shape[0],) + _dequantize_rowwise[grid](x, state_x, output, 1./127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2) + return output diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequanitze.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequanitze.py new file mode 100644 index 0000000000000000000000000000000000000000..60a56e698ad1678549cd0220e7cc42b4e613b585 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequanitze.py @@ -0,0 +1,163 @@ +import torch +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + def int8_matmul_mixed_dequanitze(a, b, state_x, state_w, bias): return None +else: + + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + + # This is a matmul kernel based on triton.ops.matmul + # It is modified to support rowwise quantized input and global quantized weight + # It's purpose is fused matmul then dequantize + # It does support bias. + + def init_to_zero(name): + return lambda nargs: nargs[name].zero_() + + def get_configs_io_bound(): + configs = [] + for num_stages in [2, 3, 4, 5, 6]: + for block_m in [16, 32]: + for block_k in [32, 64]: + for block_n in [32, 64, 128, 256]: + num_warps = 2 if block_n <= 64 else 4 + configs.append( + triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1}, + num_stages=num_stages, num_warps=num_warps)) + # split_k + for split_k in [2, 4, 8, 16]: + configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k}, + num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C'))) + return configs + + + @triton.autotune( + configs=[ + # basic configs for compute-bound matmuls + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2), + # good for int8 + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2), + ] + get_configs_io_bound(), + key=['M', 'N', 'K'], + prune_configs_by={ + 'early_config_prune': early_config_prune, + 'perf_model': estimate_matmul_time, + 'top_k': 10 + }, + ) + @triton.heuristics({ + 'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0, + }) + @triton.jit + def _int8_matmul_mixed_dequantize(A, B, C, bias, state_x_ptr, state_w_ptr, M, N, K, divfactor: tl.constexpr, has_bias : tl.constexpr, + stride_am, stride_ak, + stride_bk, stride_bn, + stride_cm, stride_cn, + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, + GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, + ACC_TYPE: tl.constexpr + ): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + # pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + + w_factor = tl.load(state_w_ptr) + x_factor = tl.load(state_x_ptr + ram)[:, None] + + # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32) + for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + k_remaining = K - k * (BLOCK_K * SPLIT_K) + a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.) + b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.) + acc += tl.dot(a, b) + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + + acc = (w_factor * (x_factor * (acc * divfactor))) + acc = acc.to(C.dtype.element_ty) + + # conditionally add bias + if has_bias: + bias = tl.load(bias + rn).to(C.dtype.element_ty) + acc = acc + bias[None, :] + + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + + def int8_matmul_mixed_dequanitze(a, b, state_x, state_w, bias): + device = a.device + divfactor = 1. / (127. * 127.) + has_bias = 0 if bias is None else 1 + # handle non-contiguous inputs if necessary + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + # checks constraints + assert a.shape[1] == b.shape[0], "incompatible dimensions" + M, K = a.shape + _, N = b.shape + # allocates output + c = torch.empty((M, N), device=device, dtype=torch.float16) + # accumulator types + ACC_TYPE = tl.float32 #if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32 + # launch int8_matmul_mixed_dequantize kernel + grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K']) + _int8_matmul_mixed_dequantize[grid](a, b, c, bias, state_x, state_w, M, N, K, divfactor, has_bias, + a.stride(0), a.stride(1), + b.stride(0), b.stride(1), + c.stride(0), c.stride(1), + GROUP_M=8, ACC_TYPE=ACC_TYPE) + return c diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py new file mode 100644 index 0000000000000000000000000000000000000000..33f4d13f227dcf627523a24d9beb48b542f39c4b --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py @@ -0,0 +1,164 @@ +import torch + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias): return None +else: + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # This is a matmul kernel based on triton.ops.matmul + # It is modified to support rowwise quantized input and columnwise quantized weight + # It's purpose is fused matmul then dequantize + # It does support bias. + + def init_to_zero(name): + return lambda nargs: nargs[name].zero_() + + + def get_configs_io_bound(): + configs = [] + for num_stages in [2, 3, 4, 5, 6]: + for block_m in [16, 32]: + for block_k in [32, 64]: + for block_n in [32, 64, 128, 256]: + num_warps = 2 if block_n <= 64 else 4 + configs.append( + triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1}, + num_stages=num_stages, num_warps=num_warps)) + # split_k + for split_k in [2, 4, 8, 16]: + configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k}, + num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C'))) + return configs + + + @triton.autotune( + configs=[ + # basic configs for compute-bound matmuls + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2), + # good for int8 + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8), + triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4), + triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2), + ] + get_configs_io_bound(), + key=['M', 'N', 'K'], + prune_configs_by={ + 'early_config_prune': early_config_prune, + 'perf_model': estimate_matmul_time, + 'top_k': 10 + }, + ) + @triton.heuristics({ + 'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0, + }) + @triton.jit + def _int8_matmul_rowwise_dequantize(A, B, C, bias, state_x_ptr, state_w_ptr, M, N, K, divfactor, has_bias : tl.constexpr, + stride_am, stride_ak, + stride_bk, stride_bn, + stride_cm, stride_cn, + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, + GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr, + ACC_TYPE: tl.constexpr + ): + # matrix multiplication + pid = tl.program_id(0) + pid_z = tl.program_id(1) + grid_m = tl.cdiv(M, BLOCK_M) + grid_n = tl.cdiv(N, BLOCK_N) + # re-order program ID for better L2 performance + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // (group_size) + # do matrix multiplication + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M) + rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N) + rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K) + # pointers + A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak) + B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn) + + # rematerialize rm and rn to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + + w_factor = tl.load(state_w_ptr + rbn)[None, :] + x_factor = tl.load(state_x_ptr + ram)[:, None] + + # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE) + acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32) + for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)): + if EVEN_K: + a = tl.load(A) + b = tl.load(B) + else: + k_remaining = K - k * (BLOCK_K * SPLIT_K) + a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.) + b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.) + acc += tl.dot(a, b) + A += BLOCK_K * SPLIT_K * stride_ak + B += BLOCK_K * SPLIT_K * stride_bk + + acc = (w_factor * (x_factor * (acc * divfactor))) + acc = acc.to(C.dtype.element_ty) + + if has_bias: + bias = tl.load(bias + rn).to(C.dtype.element_ty) + acc = acc + bias[None, :] + + C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + # handles write-back with reduction-splitting + if SPLIT_K == 1: + tl.store(C, acc, mask=mask) + else: + tl.atomic_add(C, acc, mask=mask) + + + def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias): + divfactor = 1. / (127. * 127.) + + has_bias = 0 if bias is None else 1 + + device = a.device + # handle non-contiguous inputs if necessary + if a.stride(0) > 1 and a.stride(1) > 1: + a = a.contiguous() + if b.stride(0) > 1 and b.stride(1) > 1: + b = b.contiguous() + # checks constraints + assert a.shape[1] == b.shape[0], "incompatible dimensions" + M, K = a.shape + _, N = b.shape + # allocates output + c = torch.empty((M, N), device=device, dtype=torch.float16) + # accumulator types + ACC_TYPE = tl.float32 #if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32 + # launch int8_matmul_rowwise_dequantize kernel + grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K']) + _int8_matmul_rowwise_dequantize[grid](a, b, c, bias, state_x, state_w, M, N, K, divfactor, has_bias, + a.stride(0), a.stride(1), + b.stride(0), b.stride(1), + c.stride(0), c.stride(1), + GROUP_M=8, ACC_TYPE=ACC_TYPE) + return c diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py new file mode 100644 index 0000000000000000000000000000000000000000..54220d95acc748885ee5fbc43d4d8534f46476c7 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py @@ -0,0 +1,74 @@ +import math +import torch +import time +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + def quantize_columnwise_and_transpose(x: torch.Tensor): return None +else: + + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # This kernel does fused columnwise quantization and transpose. + + # TODO: autotune this better. + @triton.autotune( + configs=[ + triton.Config({}, num_stages=1), + triton.Config({}, num_stages=2), + triton.Config({}, num_stages=4), + triton.Config({}, num_stages=8), + triton.Config({}, num_stages=16), + triton.Config({}, num_stages=1, num_warps=8), + triton.Config({}, num_stages=2, num_warps=8), + triton.Config({}, num_stages=4, num_warps=8), + triton.Config({}, num_stages=8, num_warps=8), + triton.Config({}, num_stages=16, num_warps=8), + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + ], + key=['n_elements'] + ) + @triton.jit + def _quantize_columnwise_and_transpose( + x_ptr, + output_ptr, + output_maxs, + n_elements, + M : tl.constexpr, N : tl.constexpr, + BLOCK_SIZE: tl.constexpr, + P2: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid + p2_arange = tl.arange(0, P2) + p2_arange_mask = p2_arange < M + arange = p2_arange * N + offsets = block_start + arange + x = tl.load(x_ptr + offsets, mask=p2_arange_mask) + abs_x = tl.abs(x) + max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0) + output = tl.libdevice.llrint(127. * (x / max_val)) + + new_start = pid * M + new_offsets = new_start + p2_arange + tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask) + tl.store(output_maxs + pid, max_val) + + def quantize_columnwise_and_transpose(x: torch.Tensor): + M, N = x.shape + output = torch.empty(N, M, device=x.device, dtype=torch.int8) + output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16) + + P2 = int(2 ** (math.ceil(math.log2(M)))) + + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2) + return output, output_maxs + diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py new file mode 100644 index 0000000000000000000000000000000000000000..845db6ecd52bb10a64e144c72bc214e2d5d3817b --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py @@ -0,0 +1,107 @@ +import math +import torch +import time +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + def quantize_global_transpose(input): return None + def quantize_global(x: torch.Tensor): return None +else: + + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # global quantize + @triton.autotune( + configs=[ + triton.Config({'BLOCK_SIZE': 1024,}, num_warps=4), + triton.Config({'BLOCK_SIZE': 2048,}, num_stages=1), + + ], + key=['n_elements'] + ) + @triton.jit + def _quantize_global( + x_ptr, + absmax_inv_ptr, + output_ptr, + n_elements, + BLOCK_SIZE: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + offsets = block_start + tl.arange(0, BLOCK_SIZE) + mask = offsets < n_elements + x = tl.load(x_ptr + offsets, mask=mask) + absmax_inv = tl.load(absmax_inv_ptr) + output = tl.libdevice.llrint(127. * (x * absmax_inv)) + tl.store(output_ptr + offsets, output, mask=mask) + + def quantize_global(x: torch.Tensor): + absmax = x.abs().max().unsqueeze(0) + absmax_inv = 1./ absmax + output = torch.empty(*x.shape, device='cuda', dtype=torch.int8) + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) + _quantize_global[grid](x, absmax_inv, output, n_elements) + return output, absmax + + + # global quantize and transpose + @triton.autotune( + configs=[ + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'GROUP_M': 8}, num_warps=4), + triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'GROUP_M': 8}, num_warps=4), + + # ... + ], + key=['M', 'N'] + ) + @triton.jit + def _quantize_global_transpose(A, absmax_inv_ptr, B, stride_am, stride_an, stride_bn, stride_bm, M, N, + BLOCK_M : tl.constexpr, + BLOCK_N : tl.constexpr, + GROUP_M : tl.constexpr): + pid = tl.program_id(0) + grid_m = (M + BLOCK_M - 1) // BLOCK_M + grid_n = (N + BLOCK_N - 1) // BLOCK_N + + width = GROUP_M * grid_n + group_id = pid // width + group_size = min(grid_m - group_id * GROUP_M, GROUP_M) + pid_m = group_id * GROUP_M + (pid % group_size) + pid_n = (pid % width) // group_size + + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an) + mask = (rm < M)[:, None] & (rn < N)[None, :] + a = tl.load(A, mask=mask) + absmax_inv = tl.load(absmax_inv_ptr) + + # rematerialize to save registers + rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) + rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) + B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn) + mask = (rm < M)[:, None] & (rn < N)[None, :] + + output = tl.libdevice.llrint(127. * (a * absmax_inv)) + + tl.store(B, output, mask=mask) + + def quantize_global_transpose(input): + absmax = input.abs().max().unsqueeze(0) + absmax_inv = 1./ absmax + M, N = input.shape + out = torch.empty(N, M, device='cuda', dtype=torch.int8) + + assert out.size(0) == N and out.size(1) == M + assert input.stride(0) == 1 or input.stride(1) == 1 + assert out.stride(0) == 1 or out.stride(1) == 1 + + grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']),) + _quantize_global_transpose[grid](input, absmax_inv, out, input.stride(0), input.stride(1), out.stride(0), out.stride(1), M, N) + return out, absmax + diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py new file mode 100644 index 0000000000000000000000000000000000000000..26d218321032f9780cdee8b861eda817e985e9b4 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py @@ -0,0 +1,68 @@ +import math +import torch +import time + +from bitsandbytes.triton.triton_utils import is_triton_available + +if not is_triton_available(): + def quantize_rowwise(x: torch.Tensor): return None +else: + + import triton + import triton.language as tl + from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time + + # rowwise quantize + + # TODO: autotune this better. + @triton.autotune( + configs=[ + triton.Config({}, num_stages=1, num_warps=8), + triton.Config({}, num_stages=2, num_warps=8), + triton.Config({}, num_stages=4, num_warps=8), + triton.Config({}, num_stages=8, num_warps=8), + triton.Config({}, num_stages=1), + triton.Config({}, num_stages=2), + triton.Config({}, num_stages=4), + triton.Config({}, num_stages=8), + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + ], + key=['n_elements'] + ) + @triton.jit + def _quantize_rowwise( + x_ptr, + output_ptr, + output_maxs, + n_elements, + BLOCK_SIZE: tl.constexpr, + P2: tl.constexpr, + ): + pid = tl.program_id(axis=0) + block_start = pid * BLOCK_SIZE + arange = tl.arange(0, P2) + offsets = block_start + arange + row_mask = arange < BLOCK_SIZE + x = tl.load(x_ptr + offsets, mask=row_mask) + + abs_x = tl.abs(x) + max_val = tl.max(tl.where(row_mask, abs_x, 0), axis=0) + output = tl.libdevice.llrint(127. * (x / max_val)) + tl.store(output_ptr + offsets, output, mask=row_mask) + tl.store(output_maxs + pid, max_val) + + def quantize_rowwise(x: torch.Tensor): + output = torch.empty(*x.shape, device=x.device, dtype=torch.int8) + output_maxs = torch.empty(x.shape[0], device=x.device, dtype=torch.float16) + + P2 = int(2 ** (math.ceil(math.log2(x.shape[1])))) + + assert x.is_cuda and output.is_cuda + n_elements = output.numel() + grid = lambda meta: (x.shape[0],) + _quantize_rowwise[grid](x, output, output_maxs, n_elements, BLOCK_SIZE=x.shape[1], P2=P2) + return output, output_maxs + diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c74c2396236436ec2d97ceca9334b5014cdb2585 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py @@ -0,0 +1,4 @@ +import importlib + +def is_triton_available(): + return importlib.util.find_spec("triton") is not None diff --git a/mgm/lib/python3.10/site-packages/bitsandbytes/utils.py b/mgm/lib/python3.10/site-packages/bitsandbytes/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6729f7cd4167b18e294387100b7c47adb24e3793 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/bitsandbytes/utils.py @@ -0,0 +1,199 @@ +import shlex +import subprocess +import torch +from typing import Tuple + +def outlier_hook(module, input): + assert isinstance(module, torch.nn.Linear) + tracer = OutlierTracer.get_instance() + hvalue = tracer.get_hvalue(module.weight) + if hvalue not in tracer.hvalue2outlier_idx: + outlier_idx = find_outlier_dims(module.weight) + tracer.outliers.append(outlier_idx) + tracer.hvalues.append(hvalue) + if len(tracer.outliers) > 1: + # assign the current layer the outlier idx found from the weight + # of the previous linear layer + if tracer.outliers[-1].numel() > 0: + assert tracer.outliers[-1].max() < module.weight.shape[1] + tracer.hvalue2outlier_idx[hvalue] = tracer.outliers[-1] + + else: + # first layer, we cannot use the weight for outlier detection + # we follow a mixed approach: + # (1) zscore test of std of hidden dimension + # (2) magnitude > 6 test + merged = input[0].view(-1, input[0].shape[-1]) + # (1) zscore test of std of hidden dimension + outlier_idx = find_outlier_dims(merged, reduction_dim=1, zscore=3) + # (2) magnitude > 6 test + dims = (torch.abs(input[0])> 6).sum(dim=list(range(len(input[0].shape)-1))) + outlier_idx2 = torch.where(dims > 0)[0] + outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique() + tracer.hvalue2outlier_idx[hvalue] = outlier_idx + else: + for hook in tracer.hooks: + hook.remove() + + +class OutlierTracer(object): + _instance = None + + def __init__(self): + raise RuntimeError("Call get_instance() instead") + + def initialize(self, model): + self.last_w = None + self.current_outlier_dims = None + self.hvalues = [] + self.outliers = [] + self.hvalue2outlier_idx = {} + self.initialized = True + self.hooks = [] + + for n, m in model.named_modules(): + if isinstance(m, torch.nn.Linear): + self.hooks.append(m.register_forward_pre_hook(outlier_hook)) + + def is_initialized(self): + return getattr(self, 'initialized', False) + + def get_hvalue(self, weight): + return weight.data.storage().data_ptr() + + def get_outliers(self, weight): + if not self.is_initialized(): + print('Outlier tracer is not initialized...') + return None + hvalue = self.get_hvalue(weight) + if hvalue in self.hvalue2outlier_idx: + return self.hvalue2outlier_idx[hvalue] + else: + return None + + @classmethod + def get_instance(cls): + if cls._instance is None: + cls._instance = cls.__new__(cls) + return cls._instance + +def find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False): + if rdm: + return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long() + + m = weight.mean(reduction_dim) + mm = m.mean() + mstd = m.std() + zm = (m-mm)/mstd + + std = weight.std(reduction_dim) + stdm = std.mean() + stdstd = std.std() + + zstd = (std-stdm)/stdstd + + if topk is not None: + val, idx = torch.topk(std.abs(), k=topk, dim=0) + else: + idx = torch.where(zstd > zscore)[0] + + return idx + +def replace_linear(model, linear_replacement, skip_modules=["lm_head"], copy_weights=False, post_processing_function=None): + """ + Replace linear modules with a new Linear module. + + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + linear_replacement (`torch.nn.Module`): + The linear module that replaces the old one. Only expects standard arguments. + If other arguments need to be passed, use a lambda. + skip_modules (`List[str]`, *optional*, defaults to `lm_head`): + List of modules names not to convert. Defaults to `lm_head`. + copy_weights (`bool`): + Copy the weights from the old linear module to the new one + post_processing_fun_name (`str`): + A function name of the replacement linear class that is called + after processing. + """ + for name, module in model.named_children(): + if len(list(module.children())) > 0: + replace_linear(module, linear_replacement, skip_modules, copy_weights, post_processing_function) + + if isinstance(module, torch.nn.Linear) and name not in skip_modules: + old_module = model._modules[name] + model._modules[name] = linear_replacement( + module.in_features, + module.out_features, + module.bias is not None, + ) + if copy_weights: + model._modules[name].weight = old_module.weight + model._modules[name].bias = old_module.bias + + if post_processing_function is not None: + func = getattr(module, post_processing_function, None) + if func is not None: func(module) + return model + + + +def execute_and_return(command_string: str) -> Tuple[str, str]: + def _decode(subprocess_err_out_tuple): + return tuple( + to_decode.decode("UTF-8").strip() + for to_decode in subprocess_err_out_tuple + ) + + def execute_and_return_decoded_std_streams(command_string): + return _decode( + subprocess.Popen( + shlex.split(command_string), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ).communicate() + ) + + std_out, std_err = execute_and_return_decoded_std_streams(command_string) + return std_out, std_err + + + +def replace_linear(model, linear_replacement, skip_modules=["lm_head"], copy_weights=False, post_processing_function=None): + """ + Replace linear modules with a new Linear module. + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + linear_replacement (`torch.nn.Module`): + The linear module that replaces the old one. Only expects standard arguments. + If other arguments need to be passed, use a lambda. + skip_modules (`List[str]`, *optional*, defaults to `lm_head`): + List of modules names not to convert. Defaults to `lm_head`. + copy_weights (`bool`): + Copy the weights from the old linear module to the new one + post_processing_fun_name (`str`): + A function name of the replacement linear class that is called + after processing. + """ + for name, module in model.named_children(): + if len(list(module.children())) > 0: + replace_linear(module, linear_replacement, skip_modules, copy_weights, post_processing_function) + + if isinstance(module, torch.nn.Linear) and name not in skip_modules: + old_module = model._modules[name] + model._modules[name] = linear_replacement( + module.in_features, + module.out_features, + module.bias is not None, + ) + if copy_weights: + model._modules[name].weight = old_module.weight + model._modules[name].bias = old_module.bias + + if post_processing_function is not None: + func = getattr(module, post_processing_function, None) + if func is not None: func(module) + return model + diff --git a/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/INSTALLER b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/License.txt b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/License.txt new file mode 100644 index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f --- /dev/null +++ b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/License.txt @@ -0,0 +1,1568 @@ +End User License Agreement +-------------------------- + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +This package includes over 100+ CUDA examples that demonstrate +various CUDA programming principles, and efficient CUDA +implementation of algorithms in specific application domains. + + +Default Install Location of CUDA Samples + +Windows platform: + +%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.# + +Linux platform: + +/usr/local/cuda-#.#/samples + +and + +$HOME/NVIDIA_CUDA-#.#_Samples + +Mac platform: + +/Developer/NVIDIA/CUDA-#.#/samples + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Release Date: July 26, 2018 +--------------------------- + + +Important NoticeRead before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Third Party Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, third party software licensed by a NVIDIA +supplier and/or open source software provided under an open +source license. Use of third party software is subject to the +third-party license terms, or in the absence of third party +terms, the terms of this Agreement. Copyright to third party +software is held by the copyright holders indicated in the +third-party software or license. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. Unless you have an agreement with NVIDIA for this + purpose, you may not use the SDK with any system or + application where the use or failure of the system or + application can reasonably be expected to threaten or + result in personal injury, death, or catastrophic loss. + Examples include use in avionics, navigation, military, + medical, life support or other life critical applications. + NVIDIA does not design, test or manufacture the SDK for + these critical uses and NVIDIA shall not be liable to you + or any third party, in whole or in part, for any claims or + damages arising from such uses. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to your use of + the SDK outside of the scope of this Agreement, or not in + compliance with its terms. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights described in this + section. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights described in this section. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + + +Release date: August 16, 2018 +----------------------------- + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following portions of the SDK are distributable under the +Agreement: + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +CUDA Accelerated Graph Library + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-fatbinaryloader.so, +libnvidia-ptxjitcompiler.so + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use (e.g. nvprof) of the PCRE third party + component is subject to the following terms and + conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/METADATA b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3415436f6fecfa2bee1ae3d504dbfd613d1e26ac --- /dev/null +++ b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/METADATA @@ -0,0 +1,37 @@ +Metadata-Version: 2.1 +Name: nvidia-cuda-runtime-cu11 +Version: 11.7.99 +Summary: CUDA Runtime native Libraries +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft :: Windows +Requires-Python: >=3 +License-File: License.txt +Requires-Dist: setuptools +Requires-Dist: wheel + +CUDA Runtime native Libraries diff --git a/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/REQUESTED b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/WHEEL b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f --- /dev/null +++ b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-manylinux1_x86_64 + diff --git a/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/top_level.txt b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f6488f10c181d110f0e62cee86d3de2823a6678 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/nvidia_cuda_runtime_cu11-11.7.99.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia/cuda_runtime diff --git a/mgm/lib/python3.10/site-packages/pyparsing/common.py b/mgm/lib/python3.10/site-packages/pyparsing/common.py new file mode 100644 index 0000000000000000000000000000000000000000..e46511086dadb79357441680531d286124da63c7 --- /dev/null +++ b/mgm/lib/python3.10/site-packages/pyparsing/common.py @@ -0,0 +1,434 @@ +# common.py +from .core import * +from .helpers import DelimitedList, any_open_tag, any_close_tag +from datetime import datetime + + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + - :class:`url` + + Parse actions: + + - :class:`convert_to_integer` + - :class:`convert_to_float` + - :class:`convert_to_date` + - :class:`convert_to_datetime` + - :class:`strip_html_tags` + - :class:`upcase_tokens` + - :class:`downcase_tokens` + + Example:: + + pyparsing_common.number.run_tests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.run_tests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.run_tests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.run_tests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.run_tests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID)) + pyparsing_common.uuid.run_tests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convert_to_integer = token_map(int) + """ + Parse action for converting parsed integers to Python int + """ + + convert_to_float = token_map(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = ( + Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) + ) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = ( + Regex(r"[+-]?\d+") + .set_name("signed integer") + .set_parse_action(convert_to_integer) + ) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = ( + signed_integer().set_parse_action(convert_to_float) + + "/" + + signed_integer().set_parse_action(convert_to_float) + ).set_name("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) + + mixed_integer = ( + fraction | signed_integer + Opt(Opt("-").suppress() + fraction) + ).set_name("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.add_parse_action(sum) + + real = ( + Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") + .set_name("real number") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number and returns a float""" + + sci_real = ( + Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") + .set_name("real number with scientific notation") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).set_name("number").streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = ( + Regex(r"[+-]?\d+\.?\d*(?:[eE][+-]?\d+)?") + .set_name("fnumber") + .set_parse_action(convert_to_float) + ) + """any int or real number, returned as float""" + + ieee_float = ( + Regex(r"(?i:[+-]?(?:(?:\d+\.?\d*(?:e[+-]?\d+)?)|nan|inf(?:inity)?))") + .set_name("ieee_float") + .set_parse_action(convert_to_float) + ) + """any floating-point literal (int, real number, infinity, or NaN), returned as float""" + + identifier = Word(identchars, identbodychars).set_name("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex( + r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" + ).set_name("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") + _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( + "full IPv6 address" + ) + _short_ipv6_address = ( + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + + "::" + + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + ).set_name("short IPv6 address") + _short_ipv6_address.add_condition( + lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 + ) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") + ipv6_address = Combine( + (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( + "IPv6 address" + ) + ).set_name("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex( + r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" + ).set_name("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convert_to_date(fmt: str = "%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.set_parse_action(pyparsing_common.convert_to_date()) + print(date_expr.parse_string("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + + def cvt_fn(ss, ll, tt): + try: + return datetime.strptime(tt[0], fmt).date() + except ValueError as ve: + raise ParseException(ss, ll, str(ve)) + + return cvt_fn + + @staticmethod + def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.set_parse_action(pyparsing_common.convert_to_datetime()) + print(dt_expr.parse_string("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + + return cvt_fn + + iso8601_date = Regex( + r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" + ).set_name("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex( + r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" + ).set_name("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() + + @staticmethod + def strip_html_tags(s: str, l: int, tokens: ParseResults): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' + td, td_end = make_html_tags("TD") + table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end + print(table_text.parse_string(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transform_string(tokens[0]) + + _commasepitem = ( + Combine( + OneOrMore( + ~Literal(",") + + ~LineEnd() + + Word(printables, exclude_chars=",") + + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) + ) + ) + .streamline() + .set_name("commaItem") + ) + comma_separated_list = DelimitedList( + Opt(quoted_string.copy() | _commasepitem, default="") + ).set_name("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcase_tokens = staticmethod(token_map(lambda t: t.upper())) + """Parse action to convert tokens to upper case.""" + + downcase_tokens = staticmethod(token_map(lambda t: t.lower())) + """Parse action to convert tokens to lower case.""" + + # fmt: off + url = Regex( + # https://mathiasbynens.be/demo/url-regex + # https://gist.github.com/dperini/729294 + r"(?P" + + # protocol identifier (optional) + # short syntax // still required + r"(?:(?:(?Phttps?|ftp):)?\/\/)" + + # user:pass BasicAuth (optional) + r"(?:(?P\S+(?::\S*)?)@)?" + + r"(?P" + + # IP address exclusion + # private & local networks + r"(?!(?:10|127)(?:\.\d{1,3}){3})" + + r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + + r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + + # IP address dotted notation octets + # excludes loopback network 0.0.0.0 + # excludes reserved space >= 224.0.0.0 + # excludes network & broadcast addresses + # (first & last IP address of each class) + r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + + r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + + r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + + r"|" + + # host & domain names, may end with dot + # can be replaced by a shortest alternative + # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ + r"(?:" + + r"(?:" + + r"[a-z0-9\u00a1-\uffff]" + + r"[a-z0-9\u00a1-\uffff_-]{0,62}" + + r")?" + + r"[a-z0-9\u00a1-\uffff]\." + + r")+" + + # TLD identifier name, may end with dot + r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + + r")" + + # port number (optional) + r"(:(?P\d{2,5}))?" + + # resource path (optional) + r"(?P\/[^?# ]*)?" + + # query string (optional) + r"(\?(?P[^#]*))?" + + # fragment (optional) + r"(#(?P\S*))?" + + r")" + ).set_name("url") + """URL (http/https/ftp scheme)""" + # fmt: on + + # pre-PEP8 compatibility names + # fmt: off + convertToInteger = staticmethod(replaced_by_pep8("convertToInteger", convert_to_integer)) + convertToFloat = staticmethod(replaced_by_pep8("convertToFloat", convert_to_float)) + convertToDate = staticmethod(replaced_by_pep8("convertToDate", convert_to_date)) + convertToDatetime = staticmethod(replaced_by_pep8("convertToDatetime", convert_to_datetime)) + stripHTMLTags = staticmethod(replaced_by_pep8("stripHTMLTags", strip_html_tags)) + upcaseTokens = staticmethod(replaced_by_pep8("upcaseTokens", upcase_tokens)) + downcaseTokens = staticmethod(replaced_by_pep8("downcaseTokens", downcase_tokens)) + # fmt: on + + +_builtin_exprs = [ + v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) +]