ZTWHHH commited on
Commit
47d3316
·
verified ·
1 Parent(s): c676baa

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +14 -0
  2. mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc +0 -0
  3. mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc +0 -0
  4. mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc +0 -0
  5. mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc +0 -0
  6. mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc +0 -0
  7. mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  8. mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc +0 -0
  9. mgm/lib/python3.10/site-packages/bitsandbytes/cextension.py +42 -0
  10. mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__init__.py +0 -0
  11. mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc +0 -0
  12. mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc +0 -0
  13. mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc +0 -0
  14. mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/env_vars.py +52 -0
  15. mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/main.py +364 -0
  16. mgm/lib/python3.10/site-packages/bitsandbytes/functional.py +2404 -0
  17. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so +3 -0
  18. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so +3 -0
  19. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so +3 -0
  20. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so +3 -0
  21. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so +3 -0
  22. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so +3 -0
  23. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so +3 -0
  24. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so +3 -0
  25. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so +3 -0
  26. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so +3 -0
  27. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so +3 -0
  28. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so +3 -0
  29. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so +3 -0
  30. mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so +3 -0
  31. mgm/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py +6 -0
  32. mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  33. mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc +0 -0
  34. mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc +0 -0
  35. mgm/lib/python3.10/site-packages/bitsandbytes/nn/modules.py +518 -0
  36. mgm/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py +258 -0
  37. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__init__.py +16 -0
  38. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  39. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc +0 -0
  40. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adam.cpython-310.pyc +0 -0
  41. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc +0 -0
  42. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc +0 -0
  43. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lars.cpython-310.pyc +0 -0
  44. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc +0 -0
  45. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc +0 -0
  46. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc +0 -0
  47. mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc +0 -0
  48. mgm/lib/python3.10/site-packages/bitsandbytes/optim/adagrad.py +132 -0
  49. mgm/lib/python3.10/site-packages/bitsandbytes/optim/adam.py +273 -0
  50. mgm/lib/python3.10/site-packages/bitsandbytes/optim/adamw.py +39 -0
.gitattributes CHANGED
@@ -1110,3 +1110,17 @@ mgm/lib/python3.10/site-packages/pandas/_libs/writers.cpython-310-x86_64-linux-g
1110
  mgm/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1111
  mgm/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1112
  mgm/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1110
  mgm/lib/python3.10/site-packages/pandas/_libs/groupby.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1111
  mgm/lib/python3.10/site-packages/pandas/_libs/hashing.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1112
  mgm/lib/python3.10/site-packages/pandas/_libs/index.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1113
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so filter=lfs diff=lfs merge=lfs -text
1114
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1115
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1116
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so filter=lfs diff=lfs merge=lfs -text
1117
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so filter=lfs diff=lfs merge=lfs -text
1118
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1119
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so filter=lfs diff=lfs merge=lfs -text
1120
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1121
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1122
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1123
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1124
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so filter=lfs diff=lfs merge=lfs -text
1125
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so filter=lfs diff=lfs merge=lfs -text
1126
+ mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so filter=lfs diff=lfs merge=lfs -text
mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (697 Bytes). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc ADDED
Binary file (1.45 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc ADDED
Binary file (55.3 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.21 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (256 Bytes). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/autograd/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/cextension.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes as ct
2
+ import os
3
+ import torch
4
+
5
+ from pathlib import Path
6
+ from warnings import warn
7
+
8
+ from bitsandbytes.cuda_setup.main import CUDASetup
9
+
10
+
11
+ setup = CUDASetup.get_instance()
12
+ if setup.initialized != True:
13
+ setup.run_cuda_setup()
14
+
15
+ lib = setup.lib
16
+ try:
17
+ if lib is None and torch.cuda.is_available():
18
+ CUDASetup.get_instance().generate_instructions()
19
+ CUDASetup.get_instance().print_log_stack()
20
+ raise RuntimeError('''
21
+ CUDA Setup failed despite GPU being available. Please run the following command to get more information:
22
+
23
+ python -m bitsandbytes
24
+
25
+ Inspect the output of the command and see if you can locate CUDA libraries. You might need to add them
26
+ to your LD_LIBRARY_PATH. If you suspect a bug, please take the information from python -m bitsandbytes
27
+ and open an issue at: https://github.com/TimDettmers/bitsandbytes/issues''')
28
+ lib.cadam32bit_grad_fp32 # runs on an error if the library could not be found -> COMPILED_WITH_CUDA=False
29
+ lib.get_context.restype = ct.c_void_p
30
+ lib.get_cusparse.restype = ct.c_void_p
31
+ lib.cget_managed_ptr.restype = ct.c_void_p
32
+ COMPILED_WITH_CUDA = True
33
+ except AttributeError as ex:
34
+ warn("The installed version of bitsandbytes was compiled without GPU support. "
35
+ "8-bit optimizers, 8-bit multiplication, and GPU quantization are unavailable.")
36
+ COMPILED_WITH_CUDA = False
37
+ print(str(ex))
38
+
39
+
40
+ # print the setup details after checking for errors so we do not print twice
41
+ #if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
42
+ #setup.print_log_stack()
mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/env_vars.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/__pycache__/main.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/env_vars.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict
3
+
4
+
5
+ def to_be_ignored(env_var: str, value: str) -> bool:
6
+ ignorable = {
7
+ "PWD", # PWD: this is how the shell keeps track of the current working dir
8
+ "OLDPWD",
9
+ "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
10
+ "SSH_TTY",
11
+ "HOME", # Linux shell default
12
+ "TMUX", # Terminal Multiplexer
13
+ "XDG_DATA_DIRS", # XDG: Desktop environment stuff
14
+ "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
15
+ "XDG_RUNTIME_DIR",
16
+ "MAIL", # something related to emails
17
+ "SHELL", # binary for currently invoked shell
18
+ "DBUS_SESSION_BUS_ADDRESS", # hardware related
19
+ "PATH", # this is for finding binaries, not libraries
20
+ "LESSOPEN", # related to the `less` command
21
+ "LESSCLOSE",
22
+ "_", # current Python interpreter
23
+ }
24
+ return env_var in ignorable
25
+
26
+
27
+ def might_contain_a_path(candidate: str) -> bool:
28
+ return "/" in candidate
29
+
30
+
31
+ def is_active_conda_env(env_var: str) -> bool:
32
+ return "CONDA_PREFIX" == env_var
33
+
34
+
35
+ def is_other_conda_env_var(env_var: str) -> bool:
36
+ return "CONDA" in env_var
37
+
38
+
39
+ def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
40
+ return is_active_conda_env(env_var) or (
41
+ might_contain_a_path(value) and not
42
+ is_other_conda_env_var(env_var) and not
43
+ to_be_ignored(env_var, value)
44
+ )
45
+
46
+
47
+ def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
48
+ return {
49
+ env_var: value
50
+ for env_var, value in os.environ.items()
51
+ if is_relevant_candidate_env_var(env_var, value)
52
+ }
mgm/lib/python3.10/site-packages/bitsandbytes/cuda_setup/main.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ extract factors the build is dependent on:
3
+ [X] compute capability
4
+ [ ] TODO: Q - What if we have multiple GPUs of different makes?
5
+ - CUDA version
6
+ - Software:
7
+ - CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
8
+ - CuBLAS-LT: full-build 8-bit optimizer
9
+ - no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
10
+
11
+ evaluation:
12
+ - if paths faulty, return meaningful error
13
+ - else:
14
+ - determine CUDA version
15
+ - determine capabilities
16
+ - based on that set the default path
17
+ """
18
+
19
+ import ctypes as ct
20
+ import os
21
+ import errno
22
+ import torch
23
+ from warnings import warn
24
+ from itertools import product
25
+
26
+ from pathlib import Path
27
+ from typing import Set, Union
28
+ from .env_vars import get_potentially_lib_path_containing_env_vars
29
+
30
+ # these are the most common libs names
31
+ # libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
32
+ # we have libcudart.so.11.0 which causes a lot of errors before
33
+ # not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
34
+ CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
35
+
36
+ # this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
37
+ backup_paths = []
38
+ backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0')
39
+
40
+ class CUDASetup:
41
+ _instance = None
42
+
43
+ def __init__(self):
44
+ raise RuntimeError("Call get_instance() instead")
45
+
46
+ def generate_instructions(self):
47
+ if getattr(self, 'error', False): return
48
+ print(self.error)
49
+ self.error = True
50
+ if not self.cuda_available:
51
+ self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected or CUDA not installed.')
52
+ self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
53
+ self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
54
+ self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
55
+ self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
56
+ self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
57
+ self.add_log_entry('CUDA SETUP: Solution 3): For a missing CUDA runtime library (libcudart.so), use `find / -name libcudart.so* and follow with step (2b)')
58
+ return
59
+
60
+ if self.cudart_path is None:
61
+ self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
62
+ self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
63
+ self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
64
+ self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
65
+ self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
66
+ self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
67
+ self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
68
+ self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
69
+ self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
70
+ return
71
+
72
+ make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
73
+ if len(self.cuda_version_string) < 3:
74
+ make_cmd += ' make cuda92'
75
+ elif self.cuda_version_string == '110':
76
+ make_cmd += ' make cuda110'
77
+ elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
78
+ make_cmd += ' make cuda11x'
79
+ elif self.cuda_version_string == '100':
80
+ self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
81
+ self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
82
+ return
83
+
84
+
85
+ has_cublaslt = is_cublasLt_compatible(self.cc)
86
+ if not has_cublaslt:
87
+ make_cmd += '_nomatmul'
88
+
89
+ self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
90
+ self.add_log_entry('git clone https://github.com/TimDettmers/bitsandbytes.git')
91
+ self.add_log_entry('cd bitsandbytes')
92
+ self.add_log_entry(make_cmd)
93
+ self.add_log_entry('python setup.py install')
94
+
95
+ def initialize(self):
96
+ if not getattr(self, 'initialized', False):
97
+ self.has_printed = False
98
+ self.lib = None
99
+ self.initialized = False
100
+ self.error = False
101
+
102
+ def manual_override(self):
103
+ if torch.cuda.is_available():
104
+ if 'BNB_CUDA_VERSION' in os.environ:
105
+ if len(os.environ['BNB_CUDA_VERSION']) > 0:
106
+ warn((f'\n\n{"="*80}\n'
107
+ 'WARNING: Manual override via BNB_CUDA_VERSION env variable detected!\n'
108
+ 'BNB_CUDA_VERSION=XXX can be used to load a bitsandbytes version that is different from the PyTorch CUDA version.\n'
109
+ 'If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\n'
110
+ 'If you use the manual override make sure the right libcudart.so is in your LD_LIBRARY_PATH\n'
111
+ 'For example by adding the following to your .bashrc: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path_to_cuda_dir/lib64\n'
112
+ f'Loading CUDA version: BNB_CUDA_VERSION={os.environ["BNB_CUDA_VERSION"]}'
113
+ f'\n{"="*80}\n\n'))
114
+ self.binary_name = self.binary_name[:-6] + f'{os.environ["BNB_CUDA_VERSION"]}.so'
115
+
116
+ def run_cuda_setup(self):
117
+ self.initialized = True
118
+ self.cuda_setup_log = []
119
+
120
+ binary_name, cudart_path, cc, cuda_version_string = evaluate_cuda_setup()
121
+ self.cudart_path = cudart_path
122
+ self.cuda_available = torch.cuda.is_available()
123
+ self.cc = cc
124
+ self.cuda_version_string = cuda_version_string
125
+ self.binary_name = binary_name
126
+ self.manual_override()
127
+
128
+ package_dir = Path(__file__).parent.parent
129
+ binary_path = package_dir / self.binary_name
130
+
131
+ try:
132
+ if not binary_path.exists():
133
+ self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
134
+ legacy_binary_name = "libbitsandbytes_cpu.so"
135
+ self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
136
+ binary_path = package_dir / legacy_binary_name
137
+ if not binary_path.exists() or torch.cuda.is_available():
138
+ self.add_log_entry('')
139
+ self.add_log_entry('='*48 + 'ERROR' + '='*37)
140
+ self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
141
+ self.add_log_entry('1. You need to manually override the PyTorch CUDA version. Please see: '
142
+ '"https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md')
143
+ self.add_log_entry('2. CUDA driver not installed')
144
+ self.add_log_entry('3. CUDA not installed')
145
+ self.add_log_entry('4. You have multiple conflicting CUDA libraries')
146
+ self.add_log_entry('5. Required library not pre-compiled for this bitsandbytes release!')
147
+ self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
148
+ self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
149
+ self.add_log_entry('='*80)
150
+ self.add_log_entry('')
151
+ self.generate_instructions()
152
+ raise Exception('CUDA SETUP: Setup Failed!')
153
+ self.lib = ct.cdll.LoadLibrary(binary_path)
154
+ else:
155
+ self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
156
+ self.lib = ct.cdll.LoadLibrary(binary_path)
157
+ except Exception as ex:
158
+ self.add_log_entry(str(ex))
159
+
160
+ def add_log_entry(self, msg, is_warning=False):
161
+ self.cuda_setup_log.append((msg, is_warning))
162
+
163
+ def print_log_stack(self):
164
+ for msg, is_warning in self.cuda_setup_log:
165
+ if is_warning:
166
+ warn(msg)
167
+ else:
168
+ print(msg)
169
+
170
+ @classmethod
171
+ def get_instance(cls):
172
+ if cls._instance is None:
173
+ cls._instance = cls.__new__(cls)
174
+ cls._instance.initialize()
175
+ return cls._instance
176
+
177
+
178
+ def is_cublasLt_compatible(cc):
179
+ has_cublaslt = False
180
+ if cc is not None:
181
+ cc_major, cc_minor = cc.split('.')
182
+ if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
183
+ CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU! \
184
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization: https://huggingface.co/blog/4bit-transformers-bitsandbytes", is_warning=True)
185
+ else:
186
+ has_cublaslt = True
187
+ return has_cublaslt
188
+
189
+ def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
190
+ return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
191
+
192
+
193
+ def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
194
+ existent_directories: Set[Path] = set()
195
+ for path in candidate_paths:
196
+ try:
197
+ if path.exists():
198
+ existent_directories.add(path)
199
+ except OSError as exc:
200
+ if exc.errno != errno.ENAMETOOLONG:
201
+ raise exc
202
+ except PermissionError as pex:
203
+ pass
204
+
205
+ non_existent_directories: Set[Path] = candidate_paths - existent_directories
206
+ if non_existent_directories:
207
+ CUDASetup.get_instance().add_log_entry("The following directories listed in your path were found to "
208
+ f"be non-existent: {non_existent_directories}", is_warning=False)
209
+
210
+ return existent_directories
211
+
212
+
213
+ def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
214
+ paths = set()
215
+ for libname in CUDA_RUNTIME_LIBS:
216
+ for path in candidate_paths:
217
+ if (path / libname).is_file():
218
+ paths.add(path / libname)
219
+ return paths
220
+
221
+
222
+ def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
223
+ """
224
+ Searches a given environmental var for the CUDA runtime library,
225
+ i.e. `libcudart.so`.
226
+ """
227
+ return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
228
+
229
+
230
+ def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
231
+ return get_cuda_runtime_lib_paths(
232
+ resolve_paths_list(paths_list_candidate)
233
+ )
234
+
235
+
236
+ def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
237
+ if len(results_paths) > 1:
238
+ warning_msg = (
239
+ f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
240
+ "We select the PyTorch default libcudart.so, which is {torch.version.cuda},"
241
+ "but this might missmatch with the CUDA version that is needed for bitsandbytes."
242
+ "To override this behavior set the BNB_CUDA_VERSION=<version string, e.g. 122> environmental variable"
243
+ "For example, if you want to use the CUDA version 122"
244
+ "BNB_CUDA_VERSION=122 python ..."
245
+ "OR set the environmental variable in your .bashrc: export BNB_CUDA_VERSION=122"
246
+ "In the case of a manual override, make sure you set the LD_LIBRARY_PATH, e.g."
247
+ "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2")
248
+ CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
249
+
250
+
251
+ def determine_cuda_runtime_lib_path() -> Union[Path, None]:
252
+ """
253
+ Searches for a cuda installations, in the following order of priority:
254
+ 1. active conda env
255
+ 2. LD_LIBRARY_PATH
256
+ 3. any other env vars, while ignoring those that
257
+ - are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
258
+ - don't contain the path separator `/`
259
+
260
+ If multiple libraries are found in part 3, we optimistically try one,
261
+ while giving a warning message.
262
+ """
263
+ candidate_env_vars = get_potentially_lib_path_containing_env_vars()
264
+
265
+ cuda_runtime_libs = set()
266
+ if "CONDA_PREFIX" in candidate_env_vars:
267
+ conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
268
+
269
+ conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
270
+ warn_in_case_of_duplicates(conda_cuda_libs)
271
+
272
+ if conda_cuda_libs:
273
+ cuda_runtime_libs.update(conda_cuda_libs)
274
+
275
+ CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
276
+ f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
277
+
278
+ if "LD_LIBRARY_PATH" in candidate_env_vars:
279
+ lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
280
+
281
+ if lib_ld_cuda_libs:
282
+ cuda_runtime_libs.update(lib_ld_cuda_libs)
283
+ warn_in_case_of_duplicates(lib_ld_cuda_libs)
284
+
285
+ CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
286
+ f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
287
+
288
+ remaining_candidate_env_vars = {
289
+ env_var: value for env_var, value in candidate_env_vars.items()
290
+ if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
291
+ }
292
+
293
+ cuda_runtime_libs = set()
294
+ for env_var, value in remaining_candidate_env_vars.items():
295
+ cuda_runtime_libs.update(find_cuda_lib_in(value))
296
+
297
+ if len(cuda_runtime_libs) == 0:
298
+ CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...')
299
+ cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
300
+
301
+ warn_in_case_of_duplicates(cuda_runtime_libs)
302
+
303
+ cuda_setup = CUDASetup.get_instance()
304
+ cuda_setup.add_log_entry(f'DEBUG: Possible options found for libcudart.so: {cuda_runtime_libs}')
305
+
306
+ return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
307
+
308
+
309
+ # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
310
+ def get_cuda_version():
311
+ major, minor = map(int, torch.version.cuda.split("."))
312
+
313
+ if major < 11:
314
+ CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
315
+
316
+ return f'{major}{minor}'
317
+
318
+ def get_compute_capabilities():
319
+ ccs = []
320
+ for i in range(torch.cuda.device_count()):
321
+ cc_major, cc_minor = torch.cuda.get_device_capability(torch.cuda.device(i))
322
+ ccs.append(f"{cc_major}.{cc_minor}")
323
+
324
+ return ccs
325
+
326
+
327
+ def evaluate_cuda_setup():
328
+ cuda_setup = CUDASetup.get_instance()
329
+ if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
330
+ cuda_setup.add_log_entry('')
331
+ cuda_setup.add_log_entry('='*35 + 'BUG REPORT' + '='*35)
332
+ cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
333
+ ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
334
+ cuda_setup.add_log_entry('='*80)
335
+ if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None
336
+
337
+ cudart_path = determine_cuda_runtime_lib_path()
338
+ ccs = get_compute_capabilities()
339
+ ccs.sort()
340
+ cc = ccs[-1] # we take the highest capability
341
+ cuda_version_string = get_cuda_version()
342
+
343
+ cuda_setup.add_log_entry(f"CUDA SETUP: PyTorch settings found: CUDA_VERSION={cuda_version_string}, Highest Compute Capability: {cc}.")
344
+ cuda_setup.add_log_entry(f"CUDA SETUP: To manually override the PyTorch CUDA version please see:"
345
+ "https://github.com/TimDettmers/bitsandbytes/blob/main/how_to_use_nonpytorch_cuda.md")
346
+
347
+
348
+ # 7.5 is the minimum CC vor cublaslt
349
+ has_cublaslt = is_cublasLt_compatible(cc)
350
+
351
+ # TODO:
352
+ # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
353
+ # (2) Multiple CUDA versions installed
354
+
355
+ # we use ls -l instead of nvcc to determine the cuda version
356
+ # since most installations will have the libcudart.so installed, but not the compiler
357
+
358
+ if has_cublaslt:
359
+ binary_name = f"libbitsandbytes_cuda{cuda_version_string}.so"
360
+ else:
361
+ "if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
362
+ binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt.so"
363
+
364
+ return binary_name, cudart_path, cc, cuda_version_string
mgm/lib/python3.10/site-packages/bitsandbytes/functional.py ADDED
@@ -0,0 +1,2404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import ctypes as ct
6
+ import itertools
7
+ import operator
8
+ import random
9
+ import torch
10
+ import itertools
11
+ import math
12
+ from scipy.stats import norm
13
+ import numpy as np
14
+
15
+ from functools import reduce # Required in Python 3
16
+ from typing import Tuple
17
+ from torch import Tensor
18
+
19
+ from .cextension import COMPILED_WITH_CUDA, lib
20
+
21
+
22
+ # math.prod not compatible with python < 3.8
23
+ def prod(iterable):
24
+ return reduce(operator.mul, iterable, 1)
25
+
26
+ name2qmap = {}
27
+
28
+ if COMPILED_WITH_CUDA:
29
+ """C FUNCTIONS FOR OPTIMIZERS"""
30
+ str2optimizer32bit = {}
31
+ str2optimizer32bit["adam"] = (lib.cadam32bit_grad_fp32, lib.cadam32bit_grad_fp16, lib.cadam32bit_grad_bf16)
32
+ str2optimizer32bit["momentum"] = (
33
+ lib.cmomentum32bit_grad_32,
34
+ lib.cmomentum32bit_grad_16,
35
+ )
36
+ str2optimizer32bit["rmsprop"] = (
37
+ lib.crmsprop32bit_grad_32,
38
+ lib.crmsprop32bit_grad_16,
39
+ )
40
+ str2optimizer32bit["lion"] = (lib.clion32bit_grad_fp32, lib.clion32bit_grad_fp16, lib.clion32bit_grad_bf16)
41
+ str2optimizer32bit["adagrad"] = (
42
+ lib.cadagrad32bit_grad_32,
43
+ lib.cadagrad32bit_grad_16,
44
+ )
45
+
46
+ str2optimizer8bit = {}
47
+ str2optimizer8bit["adam"] = (
48
+ lib.cadam_static_8bit_grad_32,
49
+ lib.cadam_static_8bit_grad_16,
50
+ )
51
+ str2optimizer8bit["momentum"] = (
52
+ lib.cmomentum_static_8bit_grad_32,
53
+ lib.cmomentum_static_8bit_grad_16,
54
+ )
55
+ str2optimizer8bit["rmsprop"] = (
56
+ lib.crmsprop_static_8bit_grad_32,
57
+ lib.crmsprop_static_8bit_grad_16,
58
+ )
59
+ str2optimizer8bit["lion"] = (
60
+ lib.clion_static_8bit_grad_32,
61
+ lib.clion_static_8bit_grad_16,
62
+ )
63
+ str2optimizer8bit["lamb"] = (
64
+ lib.cadam_static_8bit_grad_32,
65
+ lib.cadam_static_8bit_grad_16,
66
+ )
67
+ str2optimizer8bit["lars"] = (
68
+ lib.cmomentum_static_8bit_grad_32,
69
+ lib.cmomentum_static_8bit_grad_16,
70
+ )
71
+
72
+ str2optimizer8bit_blockwise = {}
73
+ str2optimizer8bit_blockwise["adam"] = (
74
+ lib.cadam_8bit_blockwise_grad_fp32,
75
+ lib.cadam_8bit_blockwise_grad_fp16,
76
+ lib.cadam_8bit_blockwise_grad_bf16,
77
+ )
78
+ str2optimizer8bit_blockwise["momentum"] = (
79
+ lib.cmomentum_8bit_blockwise_grad_fp32,
80
+ lib.cmomentum_8bit_blockwise_grad_fp16,
81
+ )
82
+ str2optimizer8bit_blockwise["rmsprop"] = (
83
+ lib.crmsprop_8bit_blockwise_grad_fp32,
84
+ lib.crmsprop_8bit_blockwise_grad_fp16,
85
+ )
86
+ str2optimizer8bit_blockwise["lion"] = (
87
+ lib.clion_8bit_blockwise_grad_fp32,
88
+ lib.clion_8bit_blockwise_grad_fp16,
89
+ lib.clion_8bit_blockwise_grad_bf16,
90
+ )
91
+ str2optimizer8bit_blockwise["adagrad"] = (
92
+ lib.cadagrad_8bit_blockwise_grad_fp32,
93
+ lib.cadagrad_8bit_blockwise_grad_fp16,
94
+ )
95
+
96
+ class GlobalPageManager:
97
+ _instance = None
98
+
99
+ def __init__(self):
100
+ raise RuntimeError("Call get_instance() instead")
101
+
102
+ def initialize(self):
103
+ self.paged_tensors = []
104
+
105
+ @classmethod
106
+ def get_instance(cls):
107
+ if cls._instance is None:
108
+ cls._instance = cls.__new__(cls)
109
+ cls._instance.initialize()
110
+ return cls._instance
111
+
112
+ def prefetch_all(self, to_cpu=False):
113
+ # assume the first added, will be hte
114
+ # ones that are used first, so swap them in last
115
+ # in the case they are evicted again
116
+ for t in self.paged_tensors[::-1]:
117
+ prefetch_tensor(t, to_cpu)
118
+
119
+
120
+
121
+ class CUBLAS_Context:
122
+ _instance = None
123
+
124
+ def __init__(self):
125
+ raise RuntimeError("Call get_instance() instead")
126
+
127
+ def initialize(self):
128
+ self.context = {}
129
+
130
+ @classmethod
131
+ def get_instance(cls):
132
+ if cls._instance is None:
133
+ cls._instance = cls.__new__(cls)
134
+ cls._instance.initialize()
135
+ return cls._instance
136
+
137
+ def get_context(self, device):
138
+ if device.index not in self.context:
139
+ prev_device = torch.cuda.current_device()
140
+ torch.cuda.set_device(device)
141
+ self.context[device.index] = ct.c_void_p(lib.get_context())
142
+ torch.cuda.set_device(prev_device)
143
+ return self.context[device.index]
144
+
145
+
146
+ class Cusparse_Context:
147
+ _instance = None
148
+
149
+ def __init__(self):
150
+ raise RuntimeError("Call get_instance() instead")
151
+
152
+ def initialize(self):
153
+ self.context = ct.c_void_p(lib.get_cusparse())
154
+
155
+ @classmethod
156
+ def get_instance(cls):
157
+ if cls._instance is None:
158
+ cls._instance = cls.__new__(cls)
159
+ cls._instance.initialize()
160
+ return cls._instance
161
+
162
+ dtype2bytes = {}
163
+ dtype2bytes[torch.float32] = 4
164
+ dtype2bytes[torch.float16] = 2
165
+ dtype2bytes[torch.bfloat16] = 2
166
+ dtype2bytes[torch.uint8] = 1
167
+ dtype2bytes[torch.int8] = 1
168
+
169
+ def get_paged(*shape, dtype=torch.float32, device=torch.device('cuda', index=0)):
170
+ num_bytes = dtype2bytes[dtype]*prod(shape)
171
+ cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))
172
+ c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))
173
+ new_array = np.ctypeslib.as_array(c_ptr, shape=shape)
174
+ out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)
175
+ out.is_paged = True
176
+ out.page_deviceid = device.index
177
+ return out
178
+
179
+ def prefetch_tensor(A, to_cpu=False):
180
+ assert A.is_paged, 'Only paged tensors can be prefetched!'
181
+ if to_cpu:
182
+ deviceid = -1
183
+ else:
184
+ deviceid = A.page_deviceid
185
+
186
+ num_bytes = dtype2bytes[A.dtype]*A.numel()
187
+ lib.cprefetch(get_ptr(A), ct.c_size_t(num_bytes), ct.c_int32(deviceid))
188
+
189
+ def elementwise_func(func_name, A, B, value, prefetch=True):
190
+ func = None
191
+ if A.dtype == torch.float32:
192
+ func = getattr(lib, f'c{func_name}_fp32', None)
193
+ cvalue = ct.c_float(value)
194
+ elif A.dtype == torch.uint8:
195
+ func = getattr(lib, f'c{func_name}_uint8', None)
196
+ cvalue = ct.c_uint8(value)
197
+
198
+ if func is None: raise NotImplementedError(f'Function not implemented: {func_name}')
199
+
200
+ is_managed = getattr(A, 'is_managed', False)
201
+ if is_managed and prefetch:
202
+ prefetch_tensor(A)
203
+ if B is not None: prefetch_tensor(B)
204
+
205
+ func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))
206
+ if A.is_paged or B.is_paged:
207
+ # paged function are fully asynchronous
208
+ # if we return from this function, we want to the tensor
209
+ # to be in the correct state, that is the final state after the
210
+ # operation occured. So we synchronize.
211
+ torch.cuda.synchronize()
212
+
213
+ def fill(A, value, device=None, prefetch=True): elementwise_func('fill', A, None, value)
214
+ def arange(A, device=None): elementwise_func('arange', A, None, 0)
215
+ def _mul(A, B, device=None): elementwise_func('_mul', A, B, 0)
216
+
217
+
218
+ def create_linear_map(signed=True, total_bits=8, add_zero=True):
219
+ sign = (-1.0 if signed else 0.0)
220
+ total_values = 2**total_bits
221
+ if add_zero or total_bits < 8:
222
+ # add a zero
223
+ # since we simulate less bits by having zeros in the data type, we
224
+ # we need to center the quantization around zero and as such lose
225
+ # a single value
226
+ total_values = (2**total_bits if not signed else 2**total_bits-1)
227
+
228
+ values = torch.linspace(sign, 1.0, total_values)
229
+ gap = 256 - values.numel()
230
+ if gap == 0:
231
+ return values
232
+ else:
233
+ l = values.numel()//2
234
+ return torch.Tensor(values[:l].tolist() + [0]*gap + values[l:].tolist())
235
+
236
+ def create_normal_map(offset=0.9677083, use_extra_value=True):
237
+
238
+ if use_extra_value:
239
+ # one more positive value, this is an asymmetric type
240
+ v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()
241
+ v2 = [0]*(256-15) ## we have 15 non-zero values in this data type
242
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
243
+ else:
244
+ v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()
245
+ v2 = [0]*(256-14) ## we have 14 non-zero values in this data type
246
+ v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()
247
+
248
+ v = v1 + v2 + v3
249
+
250
+ values = torch.Tensor(v)
251
+ values = values.sort().values
252
+ values /= values.max()
253
+
254
+ assert values.numel() == 256
255
+
256
+ return values
257
+
258
+ def create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):
259
+ e = exponent_bits
260
+ p = precision_bits
261
+ has_sign = 1 if signed else 0
262
+ assert e+p == total_bits-has_sign
263
+ # the exponent is biased to 2^(e-1) -1 == 0
264
+ evalues = []
265
+ pvalues = []
266
+ for i, val in enumerate(range(-((2**(exponent_bits-has_sign))), 2**(exponent_bits-has_sign), 1)):
267
+ evalues.append(2**val)
268
+
269
+
270
+ values = []
271
+ lst = list(itertools.product([0, 1], repeat=precision_bits))
272
+ #for ev in evalues:
273
+ bias = 2**(exponent_bits-1)
274
+ for evalue in range(2**(exponent_bits)):
275
+ for bit_pattern in lst:
276
+ value = (1 if evalue != 0 else 0)
277
+ for i, pval in enumerate(list(bit_pattern)):
278
+ value += pval*(2**-(i+1))
279
+ if evalue == 0:
280
+ # subnormals
281
+ value = value*2**-(bias)
282
+ else:
283
+ # normals
284
+ value = value*2**-(evalue-bias-1)
285
+ values.append(value)
286
+ if signed:
287
+ values.append(-value)
288
+
289
+
290
+ assert len(values) == 2**total_bits
291
+ values.sort()
292
+ if total_bits < 8:
293
+ gap = 256 - len(values)
294
+ for i in range(gap):
295
+ values.append(0)
296
+ values.sort()
297
+ code = torch.Tensor(values)
298
+ code /= code.max()
299
+
300
+ return code
301
+
302
+
303
+
304
+ def create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):
305
+ """
306
+ Creates the dynamic quantiztion map.
307
+
308
+ The dynamic data type is made up of a dynamic exponent and
309
+ fraction. As the exponent increase from 0 to -7 the number
310
+ of bits available for the fraction shrinks.
311
+
312
+ This is a generalization of the dynamic type where a certain
313
+ number of the bits and be reserved for the linear quantization
314
+ region (the fraction). n determines the maximum number of
315
+ exponent bits.
316
+
317
+ For more details see
318
+ (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]
319
+ """
320
+
321
+ data = []
322
+ # these are additional items that come from the case
323
+ # where all the exponent bits are zero and no
324
+ # indicator bit is present
325
+ non_sign_bits = total_bits - (1 if signed else 0)
326
+ additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1
327
+ if not signed:
328
+ additional_items = 2 * additional_items
329
+ for i in range(max_exponent_bits):
330
+ fraction_items = int((2 ** (i + non_sign_bits - max_exponent_bits) + 1 if signed else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1))
331
+ boundaries = torch.linspace(0.1, 1, fraction_items)
332
+ means = (boundaries[:-1] + boundaries[1:]) / 2.0
333
+ data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
334
+ if signed:
335
+ data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
336
+
337
+ if additional_items > 0:
338
+ boundaries = torch.linspace(0.1, 1, additional_items + 1)
339
+ means = (boundaries[:-1] + boundaries[1:]) / 2.0
340
+ data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
341
+ if signed:
342
+ data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()
343
+
344
+ data.append(0)
345
+ data.append(1.0)
346
+
347
+ gap = 256 - len(data)
348
+ for i in range(gap):
349
+ data.append(0)
350
+
351
+ data.sort()
352
+ return Tensor(data)
353
+
354
+ def create_quantile_map(A, total_bits=8):
355
+ q = estimate_quantiles(A, num_quantiles=2**total_bits-1)
356
+ q = q.tolist()
357
+ q.append(0)
358
+
359
+ gap = 256 - len(q)
360
+ for i in range(gap):
361
+ q.append(0)
362
+
363
+ q.sort()
364
+
365
+ q = Tensor(q)
366
+ q = q/q.abs().max()
367
+ return q
368
+
369
+ def get_special_format_str():
370
+ if not torch.cuda.is_available(): return 'col_turing'
371
+ major, _minor = torch.cuda.get_device_capability()
372
+ if major <= 7:
373
+ return "col_turing"
374
+ if major == 8:
375
+ return "col_ampere"
376
+ return "col_turing"
377
+
378
+
379
+
380
+ def is_on_gpu(tensors):
381
+ on_gpu = True
382
+ gpu_ids = set()
383
+ for t in tensors:
384
+ if t is None: continue # NULL pointers are fine
385
+ is_paged = getattr(t, 'is_paged', False)
386
+ on_gpu &= (t.device.type == 'cuda' or is_paged)
387
+ if not is_paged:
388
+ gpu_ids.add(t.device.index)
389
+ if not on_gpu:
390
+ raise TypeError(f'All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\n {[(t.shape, t.device) for t in tensors]}')
391
+ if len(gpu_ids) > 1:
392
+ raise TypeError(f'Input tensors need to be on the same GPU, but found the following tensor and device combinations:\n {[(t.shape, t.device) for t in tensors]}')
393
+ return on_gpu
394
+
395
+ def get_ptr(A: Tensor) -> ct.c_void_p:
396
+ """
397
+ Get the ctypes pointer from a PyTorch Tensor.
398
+
399
+ Parameters
400
+ ----------
401
+ A : torch.tensor
402
+ The PyTorch tensor.
403
+
404
+ Returns
405
+ -------
406
+ ctypes.c_void_p
407
+ """
408
+ if A is None:
409
+ return None
410
+ else:
411
+ return ct.c_void_p(A.data.data_ptr())
412
+
413
+
414
+ def pre_call(device):
415
+ prev_device = torch.cuda.current_device()
416
+ torch.cuda.set_device(device)
417
+ return prev_device
418
+
419
+
420
+ def post_call(prev_device):
421
+ torch.cuda.set_device(prev_device)
422
+
423
+
424
+ def get_transform_func(dtype, orderA, orderOut, transpose=False):
425
+ name = f'ctransform_{(8 if dtype == torch.int8 else 32)}_{orderA}_to_{orderOut}_{"t" if transpose else "n"}'
426
+ if not hasattr(lib, name):
427
+ print(name)
428
+ raise ValueError(
429
+ f"Transform function not supported: {orderA} to {orderOut} for data type {dtype} and transpose={transpose}"
430
+ )
431
+ else:
432
+ return getattr(lib, name)
433
+
434
+
435
+ def get_transform_buffer(
436
+ shape, dtype, device, to_order, from_order="row", transpose=False
437
+ ):
438
+ # init_func = torch.empty
439
+ init_func = torch.zeros
440
+ dims = len(shape)
441
+
442
+ if dims == 2:
443
+ rows = shape[0]
444
+ elif dims == 3:
445
+ rows = shape[0] * shape[1]
446
+ cols = shape[-1]
447
+
448
+ state = (shape, to_order)
449
+ if transpose:
450
+ # swap dims
451
+ tmp = rows
452
+ rows = cols
453
+ cols = tmp
454
+ state = (shape[::-1], to_order)
455
+
456
+ if to_order == "row" or to_order == "col":
457
+ return init_func(shape, dtype=dtype, device=device), state
458
+ elif to_order == "col32":
459
+ # blocks of 32 columns (padded)
460
+ cols = 32 * ((cols + 31) // 32)
461
+ return init_func((rows, cols), dtype=dtype, device=device), state
462
+ elif to_order == "col_turing":
463
+ # blocks of 32 columns and 8 rows
464
+ cols = 32 * ((cols + 31) // 32)
465
+ rows = 8 * ((rows + 7) // 8)
466
+ return init_func((rows, cols), dtype=dtype, device=device), state
467
+ elif to_order == "col_ampere":
468
+ # blocks of 32 columns and 32 rows
469
+ cols = 32 * ((cols + 31) // 32)
470
+ rows = 32 * ((rows + 31) // 32)
471
+ return init_func((rows, cols), dtype=dtype, device=device), state
472
+ else:
473
+ raise NotImplementedError(f"To_order not supported: {to_order}")
474
+
475
+
476
+ def nvidia_transform(
477
+ A,
478
+ to_order,
479
+ from_order="row",
480
+ out=None,
481
+ transpose=False,
482
+ state=None,
483
+ ld=None,
484
+ ):
485
+ if state is None:
486
+ state = (A.shape, from_order)
487
+ else:
488
+ from_order = state[1]
489
+ if out is None:
490
+ out, new_state = get_transform_buffer(
491
+ state[0], A.dtype, A.device, to_order, state[1]
492
+ )
493
+ else:
494
+ new_state = (state[1], to_order)
495
+ func = get_transform_func(A.dtype, from_order, to_order, transpose)
496
+
497
+ shape = state[0]
498
+ if len(shape) == 2:
499
+ dim1 = ct.c_int32(shape[0])
500
+ dim2 = ct.c_int32(shape[1])
501
+ elif ld is not None:
502
+ n = prod(shape)
503
+ dim1 = prod([shape[i] for i in ld])
504
+ dim2 = ct.c_int32(n // dim1)
505
+ dim1 = ct.c_int32(dim1)
506
+ else:
507
+ dim1 = ct.c_int32(shape[0] * shape[1])
508
+ dim2 = ct.c_int32(shape[2])
509
+
510
+ ptr = CUBLAS_Context.get_instance().get_context(A.device)
511
+ func(ptr, get_ptr(A), get_ptr(out), dim1, dim2)
512
+
513
+ return out, new_state
514
+
515
+
516
+ def estimate_quantiles(A: Tensor, out: Tensor = None, offset: float = 1 / 512, num_quantiles=256) -> Tensor:
517
+ '''
518
+ Estimates 256 equidistant quantiles on the input tensor eCDF.
519
+
520
+ Uses SRAM-Quantiles algorithm to quickly estimate 256 equidistant quantiles
521
+ via the eCDF of the input tensor `A`. This is a fast but approximate algorithm
522
+ and the extreme quantiles close to 0 and 1 have high variance / large estimation
523
+ errors. These large errors can be avoided by using the offset variable which trims
524
+ the distribution. The default offset value of 1/512 ensures minimum entropy encoding -- it
525
+ trims 1/512 = 0.2% from each side of the distrivution. An offset value of 0.01 to 0.02
526
+ usually has a much lower error but is not a minimum entropy encoding. Given an offset
527
+ of 0.02 equidistance points in the range [0.02, 0.98] are used for the quantiles.
528
+
529
+ Parameters
530
+ ----------
531
+ A : torch.Tensor
532
+ The input tensor. Any shape.
533
+ out : torch.Tensor
534
+ Tensor with the 256 estimated quantiles.
535
+ offset : float
536
+ The offset for the first and last quantile from 0 and 1. Default: 1/(2*num_quantiles)
537
+ num_quantiles : int
538
+ The number of equally spaced quantiles.
539
+
540
+ Returns
541
+ -------
542
+ torch.Tensor:
543
+ The 256 quantiles in float32 datatype.
544
+ '''
545
+ if A.numel() < 256: raise NotImplementedError(f'Quantile estimation needs at least 256 values in the Tensor, but Tensor had only {A.numel()} values.')
546
+ if num_quantiles > 256: raise NotImplementedError(f"Currently only a maximum of 256 equally spaced quantiles are supported, but the argument num_quantiles={num_quantiles}")
547
+ if num_quantiles < 256 and offset == 1/(512):
548
+ # override default arguments
549
+ offset = 1/(2*num_quantiles)
550
+
551
+ if out is None: out = torch.zeros((256,), dtype=torch.float32, device=A.device)
552
+ is_on_gpu([A, out])
553
+ device = pre_call(A.device)
554
+ if A.dtype == torch.float32:
555
+ lib.cestimate_quantiles_fp32(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
556
+ elif A.dtype == torch.float16:
557
+ lib.cestimate_quantiles_fp16(get_ptr(A), get_ptr(out), ct.c_float(offset), ct.c_int(A.numel()))
558
+ else:
559
+ raise NotImplementedError(f"Not supported data type {A.dtype}")
560
+ post_call(device)
561
+
562
+ if num_quantiles < 256:
563
+ step = round(256/num_quantiles)
564
+ idx = torch.linspace(0, 255, num_quantiles).long().to(A.device)
565
+ out = out[idx]
566
+
567
+ return out
568
+
569
+
570
+ def quantize_blockwise(A: Tensor, code: Tensor = None, absmax: Tensor = None, out: Tensor = None, blocksize=4096, nested=False) -> Tensor:
571
+ """
572
+ Quantize tensor A in blocks of size 4096 values.
573
+
574
+ Quantizes tensor A by dividing it into blocks of 4096 values.
575
+ Then the absolute maximum value within these blocks is calculated
576
+ for the non-linear quantization.
577
+
578
+ Parameters
579
+ ----------
580
+ A : torch.Tensor
581
+ The input tensor.
582
+ code : torch.Tensor
583
+ The quantization map.
584
+ absmax : torch.Tensor
585
+ The absmax values.
586
+ out : torch.Tensor
587
+ The output tensor (8-bit).
588
+
589
+ Returns
590
+ -------
591
+ torch.Tensor:
592
+ The 8-bit tensor.
593
+ tuple(torch.Tensor, torch.Tensor):
594
+ The quantization state to undo the quantization.
595
+ """
596
+
597
+
598
+ if code is None:
599
+ if "dynamic" not in name2qmap:
600
+ name2qmap["dynamic"] = create_dynamic_map().to(A.device)
601
+ code = name2qmap["dynamic"]
602
+
603
+ if absmax is None:
604
+ n = A.numel()
605
+ blocks = n // blocksize
606
+ blocks += 1 if n % blocksize > 0 else 0
607
+ absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
608
+
609
+ if out is None:
610
+ out = torch.zeros_like(A, dtype=torch.uint8)
611
+
612
+ if A.device.type != 'cpu':
613
+ assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
614
+ cblocksize = ct.c_int32(blocksize)
615
+ prev_device = pre_call(A.device)
616
+ code = code.to(A.device)
617
+ is_on_gpu([code, A, out, absmax])
618
+ if A.dtype == torch.float32:
619
+ lib.cquantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
620
+ elif A.dtype == torch.float16:
621
+ lib.cquantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
622
+ elif A.dtype == torch.bfloat16:
623
+ lib.cquantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), cblocksize, ct.c_int(A.numel()))
624
+ else:
625
+ raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
626
+ post_call(A.device)
627
+ else:
628
+ # cpu
629
+ code = code.cpu()
630
+ lib.cquantize_blockwise_cpu_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
631
+
632
+ if nested:
633
+ offset = absmax.mean()
634
+ absmax -= offset
635
+ qabsmax, state2 = quantize_blockwise(absmax, blocksize=blocksize, nested=False)
636
+ state = [qabsmax, code, blocksize, nested, A.dtype, offset, state2]
637
+ else:
638
+ state = [absmax, code, blocksize, nested, A.dtype, None, None]
639
+
640
+ return out, state
641
+
642
+
643
+ def dequantize_blockwise(
644
+ A: Tensor,
645
+ quant_state: Tuple[Tensor, Tensor] = None,
646
+ absmax: Tensor = None,
647
+ code: Tensor = None,
648
+ out: Tensor = None,
649
+ blocksize: int = 4096,
650
+ nested=False
651
+ ) -> Tensor:
652
+ """
653
+ Dequantizes blockwise quantized values.
654
+
655
+ Dequantizes the tensor A with maximum absolute values absmax in
656
+ blocks of size 4096.
657
+
658
+ Parameters
659
+ ----------
660
+ A : torch.Tensor
661
+ The input 8-bit tensor.
662
+ quant_state : tuple(torch.Tensor, torch.Tensor)
663
+ Tuple of code and absmax values.
664
+ absmax : torch.Tensor
665
+ The absmax values.
666
+ code : torch.Tensor
667
+ The quantization map.
668
+ out : torch.Tensor
669
+ Dequantized output tensor (default: float32)
670
+
671
+
672
+ Returns
673
+ -------
674
+ torch.Tensor:
675
+ Dequantized tensor (default: float32)
676
+ """
677
+ assert quant_state is not None or absmax is not None
678
+ if code is None and quant_state is None:
679
+ if "dynamic" not in name2qmap:
680
+ name2qmap["dynamic"] = create_dynamic_map().to(A.device)
681
+ code = name2qmap["dynamic"]
682
+
683
+ if quant_state is None:
684
+ quant_state = (absmax, code, blocksize, False, torch.float32, None, None)
685
+
686
+ absmax, code, blocksize, nested, dtype, offset, state2 = quant_state
687
+
688
+ if nested:
689
+ absmax = dequantize_blockwise(absmax, state2)
690
+ absmax += offset
691
+ if absmax.dtype != torch.float32: absmax = absmax.float()
692
+
693
+ if out is None:
694
+ out = torch.empty(A.shape, dtype=dtype, device=A.device)
695
+
696
+ if A.device.type != 'cpu':
697
+ device = pre_call(A.device)
698
+ code = code.to(A.device)
699
+ if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
700
+ raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
701
+ is_on_gpu([A, absmax, out])
702
+ if out.dtype == torch.float32:
703
+ lib.cdequantize_blockwise_fp32(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
704
+ elif out.dtype == torch.float16:
705
+ lib.cdequantize_blockwise_fp16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
706
+ elif out.dtype == torch.bfloat16:
707
+ lib.cdequantize_blockwise_bf16(get_ptr(code), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(A.numel()))
708
+ else:
709
+ raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
710
+ post_call(A.device)
711
+ else:
712
+ code = code.cpu()
713
+ lib.cdequantize_blockwise_cpu_fp32(get_ptr(quant_state[1]), get_ptr(A), get_ptr(quant_state[0]), get_ptr(out), ct.c_longlong(blocksize), ct.c_longlong(A.numel()))
714
+
715
+ return out
716
+
717
+ def get_4bit_type(typename, device=None, blocksize=64):
718
+ if device is None: device = 'cuda'
719
+ data = None
720
+ if typename == 'nf4':
721
+ ''' Implements the NF4 data type.
722
+
723
+ Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
724
+ is normalized into the range [-1, 1].
725
+
726
+ For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
727
+
728
+ Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
729
+ the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
730
+ '''
731
+ data = [-1.0, -0.6961928009986877, -0.5250730514526367, -0.39491748809814453, -0.28444138169288635,
732
+ -0.18477343022823334, -0.09105003625154495, 0.0, 0.07958029955625534, 0.16093020141124725,
733
+ 0.24611230194568634, 0.33791524171829224, 0.44070982933044434, 0.5626170039176941,
734
+ 0.7229568362236023, 1.0]
735
+ elif typename == 'fp4':
736
+ # 0b000 = 0
737
+ # 0b001 = 0.0625
738
+ # 0b010 = 8
739
+ # 0b011 = 12
740
+ # 0b100 = 4
741
+ # 0b101 = 6
742
+ # 0b110 = 2
743
+ # 0b111 = 3
744
+ # can also be created with bnb.functional.create_fp8_map(signed=True, exponent_bits=2, precision_bits=1, total_bits=4)
745
+ data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]
746
+ elif typename == 'int4':
747
+ data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]
748
+ elif typename == 'af4':
749
+ # Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)
750
+ # https://arxiv.org/abs/2306.06965
751
+ if blocksize == 64:
752
+ data = [-1., -0.69441008, -0.51243739, -0.3736951, -0.25607552, -0.14982478,
753
+ -0.04934812, 0., 0.04273164, 0.12934483, 0.21961274, 0.31675666,
754
+ 0.42563882, 0.55496234, 0.72424863, 1.][::-1]
755
+ else:
756
+ raise NotImplementedError(f'4-bit AbnormalFloats currently only support blocksize 64.')
757
+
758
+ if data is None:
759
+ raise NotImplementedError(f'Typename {typename} not supported')
760
+
761
+ data = Tensor(data)
762
+ data /= data.abs().max()
763
+ assert data.numel() == 16
764
+
765
+ return data.to(device)
766
+
767
+
768
+
769
+ def quantize_fp4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
770
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'fp4')
771
+
772
+ def quantize_nf4(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False):
773
+ return quantize_4bit(A, absmax, out, blocksize, compress_statistics, 'nf4')
774
+
775
+ def quantize_4bit(A: Tensor, absmax: Tensor = None, out: Tensor = None, blocksize=64, compress_statistics=False, quant_type='fp4') -> Tensor:
776
+ """
777
+ Quantize tensor A in blocks of 4-bit values.
778
+
779
+ Quantizes tensor A by dividing it into blocks which are independently quantized to FP4.
780
+
781
+ Parameters
782
+ ----------
783
+ A : torch.Tensor
784
+ The input tensor.
785
+ absmax : torch.Tensor
786
+ The absmax values.
787
+ out : torch.Tensor
788
+ The output tensor (8-bit).
789
+ blocksize : int
790
+ The blocksize used in quantization.
791
+ quant_type : str
792
+ The 4-bit quantization data type {fp4, nf4}
793
+
794
+ Returns
795
+ -------
796
+ torch.Tensor:
797
+ The 8-bit tensor with packed 4-bit values.
798
+ tuple(torch.Tensor, torch.Size, torch.dtype, int):
799
+ The quantization state to undo the quantization.
800
+ """
801
+ if A.device.type != 'cuda':
802
+ raise NotImplementedError(f'Device type not supported for FP4 quantization: {A.device.type}')
803
+ if quant_type not in ['fp4', 'nf4']:
804
+ raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
805
+
806
+ n = A.numel()
807
+ input_shape = A.shape
808
+
809
+ if absmax is None:
810
+ blocks = n // blocksize
811
+ blocks += 1 if n % blocksize > 0 else 0
812
+ absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)
813
+
814
+
815
+ if out is None:
816
+ out = torch.zeros(((n+1)//2, 1), dtype=torch.uint8, device=A.device)
817
+
818
+ assert blocksize in [4096, 2048, 1024, 512, 256, 128, 64]
819
+
820
+ prev_device = pre_call(A.device)
821
+ is_on_gpu([A, out, absmax])
822
+
823
+ if A.dtype == torch.float32:
824
+ if quant_type == 'fp4':
825
+ lib.cquantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
826
+ else:
827
+ lib.cquantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
828
+ elif A.dtype == torch.float16:
829
+ if quant_type == 'fp4':
830
+ lib.cquantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
831
+ else:
832
+ lib.cquantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
833
+ elif A.dtype == torch.bfloat16:
834
+ if quant_type == 'fp4':
835
+ lib.cquantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
836
+ else:
837
+ lib.cquantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int32(blocksize), ct.c_int(n))
838
+ else:
839
+ raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
840
+ post_call(A.device)
841
+
842
+ datatype = get_4bit_type(quant_type, device=A.device)
843
+
844
+ if compress_statistics:
845
+ offset = absmax.mean()
846
+ absmax -= offset
847
+ qabsmax, state2 = quantize_blockwise(absmax, blocksize=256)
848
+ del absmax
849
+ state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type, datatype]
850
+ else:
851
+ state = [absmax, input_shape, A.dtype, blocksize, None, quant_type, datatype]
852
+
853
+ return out, state
854
+
855
+ def dequantize_fp4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
856
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'fp4')
857
+
858
+ def dequantize_nf4(A: Tensor, quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64) -> Tensor:
859
+ return dequantize_4bit(A, quant_state, absmax, out, blocksize, 'nf4')
860
+
861
+ def dequantize_4bit(A: Tensor,quant_state: Tuple[Tensor, Tensor] = None, absmax: Tensor = None, out: Tensor = None, blocksize: int = 64, quant_type='fp4') -> Tensor:
862
+ """
863
+ Dequantizes FP4 blockwise quantized values.
864
+
865
+ Dequantizes the tensor A with maximum absolute values absmax in blocks of size blocksize.
866
+
867
+ Parameters
868
+ ----------
869
+ A : torch.Tensor
870
+ The input 8-bit tensor (packed 4-bit values).
871
+ quant_state : tuple(torch.Tensor, torch.Size, torch.dtype)
872
+ Tuple of absmax values, original tensor shape and original dtype.
873
+ absmax : torch.Tensor
874
+ The absmax values.
875
+ out : torch.Tensor
876
+ Dequantized output tensor.
877
+ blocksize : int
878
+ The blocksize used in quantization.
879
+ quant_type : str
880
+ The 4-bit quantization data type {fp4, nf4}
881
+
882
+
883
+ Returns
884
+ -------
885
+ torch.Tensor:
886
+ Dequantized tensor.
887
+ """
888
+ if blocksize not in [2048, 4096, 1024, 512, 256, 128, 64]:
889
+ raise ValueError(f"The blockwise of {blocksize} is not supported. Supported values: [2048, 4096, 1024, 512, 256, 128, 64]")
890
+ if quant_type not in ['fp4', 'nf4']:
891
+ raise NotImplementedError(f'4-bit quantization data type {quant_type} is not implemented.')
892
+
893
+ if quant_state is None:
894
+ assert absmax is not None and out is not None
895
+ shape = out.shape
896
+ dtype = out.dtype
897
+ else:
898
+ absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = quant_state
899
+
900
+
901
+ if compressed_stats is not None:
902
+ offset, state2 = compressed_stats
903
+ absmax = dequantize_blockwise(absmax, state2)
904
+ absmax += offset
905
+ if absmax.dtype != torch.float32: absmax = absmax.float()
906
+
907
+ if out is None:
908
+ out = torch.empty(shape, dtype=dtype, device=A.device)
909
+
910
+ n = out.numel()
911
+
912
+
913
+ device = pre_call(A.device)
914
+ is_on_gpu([A, absmax, out])
915
+ if out.dtype == torch.float32:
916
+ if quant_type == 'fp4':
917
+ lib.cdequantize_blockwise_fp32_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
918
+ else:
919
+ lib.cdequantize_blockwise_fp32_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
920
+ elif out.dtype == torch.float16:
921
+ if quant_type == 'fp4':
922
+ lib.cdequantize_blockwise_fp16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
923
+ else:
924
+ lib.cdequantize_blockwise_fp16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
925
+ elif out.dtype == torch.bfloat16:
926
+ if quant_type == 'fp4':
927
+ lib.cdequantize_blockwise_bf16_fp4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
928
+ else:
929
+ lib.cdequantize_blockwise_bf16_nf4(get_ptr(None), get_ptr(A), get_ptr(absmax), get_ptr(out), ct.c_int(blocksize), ct.c_int(n))
930
+ else:
931
+ raise ValueError(f"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}")
932
+ post_call(A.device)
933
+
934
+ is_transposed = (True if A.shape[0] == 1 else False)
935
+ if is_transposed: return out.t()
936
+ else: return out
937
+
938
+
939
+ def quantize(A: Tensor, code: Tensor = None, out: Tensor = None) -> Tensor:
940
+ if code is None:
941
+ if "dynamic" not in name2qmap:
942
+ name2qmap["dynamic"] = create_dynamic_map().to(A.device)
943
+ code = name2qmap["dynamic"]
944
+ code = code.to(A.device)
945
+
946
+ absmax = torch.abs(A).max()
947
+ if absmax.dtype != torch.float32: absmax = absmax.float()
948
+ inp = A / absmax
949
+ out = quantize_no_absmax(inp, code, out)
950
+ return out, (absmax, code)
951
+
952
+
953
+ def dequantize(
954
+ A: Tensor,
955
+ quant_state: Tuple[Tensor, Tensor] = None,
956
+ absmax: Tensor = None,
957
+ code: Tensor = None,
958
+ out: Tensor = None,
959
+ ) -> Tensor:
960
+ assert quant_state is not None or absmax is not None
961
+ if code is None and quant_state is None:
962
+ if "dynamic" not in name2qmap:
963
+ name2qmap["dynamic"] = create_dynamic_map().to(A.device)
964
+ code = name2qmap["dynamic"]
965
+ code = code.to(A.device)
966
+
967
+ if quant_state is None:
968
+ quant_state = (absmax, code)
969
+ out = dequantize_no_absmax(A, quant_state[1], out)
970
+ return out * quant_state[0]
971
+
972
+
973
+ def quantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor:
974
+ '''
975
+ Quantizes input tensor to 8-bit.
976
+
977
+ Quantizes the 32-bit input tensor `A` to the 8-bit output tensor
978
+ `out` using the quantization map `code`.
979
+
980
+ Parameters
981
+ ----------
982
+ A : torch.Tensor
983
+ The input tensor.
984
+ code : torch.Tensor
985
+ The quantization map.
986
+ out : torch.Tensor, optional
987
+ The output tensor. Needs to be of type byte.
988
+
989
+ Returns
990
+ -------
991
+ torch.Tensor:
992
+ Quantized 8-bit tensor.
993
+ '''
994
+ prev_device = pre_call(A.device)
995
+ if out is None: out = torch.zeros_like(A, dtype=torch.uint8)
996
+ is_on_gpu([A, out])
997
+ lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
998
+ post_call(prev_device)
999
+ return out
1000
+
1001
+
1002
+ def dequantize_no_absmax(A: Tensor, code: Tensor, out: Tensor = None) -> Tensor:
1003
+ '''
1004
+ Dequantizes the 8-bit tensor to 32-bit.
1005
+
1006
+ Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via
1007
+ the quantization map `code`.
1008
+
1009
+ Parameters
1010
+ ----------
1011
+ A : torch.Tensor
1012
+ The 8-bit input tensor.
1013
+ code : torch.Tensor
1014
+ The quantization map.
1015
+ out : torch.Tensor
1016
+ The 32-bit output tensor.
1017
+
1018
+ Returns
1019
+ -------
1020
+ torch.Tensor:
1021
+ 32-bit output tensor.
1022
+ '''
1023
+ prev_device = pre_call(A.device)
1024
+ if out is None: out = torch.zeros_like(A, dtype=torch.float32)
1025
+ is_on_gpu([code, A, out])
1026
+ lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))
1027
+ post_call(prev_device)
1028
+ return out
1029
+
1030
+
1031
+ def optimizer_update_32bit(
1032
+ optimizer_name: str,
1033
+ g: Tensor,
1034
+ p: Tensor,
1035
+ state1: Tensor,
1036
+ beta1: float,
1037
+ eps: float,
1038
+ step: int,
1039
+ lr: float,
1040
+ state2: Tensor = None,
1041
+ beta2: float = 0.0,
1042
+ weight_decay: float = 0.0,
1043
+ gnorm_scale: float = 1.0,
1044
+ unorm_vec: Tensor = None,
1045
+ max_unorm: float = 0.0,
1046
+ skip_zeros=False,
1047
+ ) -> None:
1048
+ """
1049
+ Performs an inplace optimizer update with one or two optimizer states.
1050
+
1051
+ Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.
1052
+
1053
+ Parameters
1054
+ ----------
1055
+ optimizer_name : str
1056
+ The name of the optimizer: {adam}.
1057
+ g : torch.Tensor
1058
+ Gradient tensor.
1059
+ p : torch.Tensor
1060
+ Parameter tensor.
1061
+ state1 : torch.Tensor
1062
+ Optimizer state 1.
1063
+ beta1 : float
1064
+ Optimizer beta1.
1065
+ eps : float
1066
+ Optimizer epsilon.
1067
+ weight_decay : float
1068
+ Weight decay.
1069
+ step : int
1070
+ Current optimizer step.
1071
+ lr : float
1072
+ The learning rate.
1073
+ state2 : torch.Tensor
1074
+ Optimizer state 2.
1075
+ beta2 : float
1076
+ Optimizer beta2.
1077
+ gnorm_scale : float
1078
+ The factor to rescale the gradient to the max clip value.
1079
+ unorm_vec : torch.Tensor
1080
+ The tensor for the update norm.
1081
+ max_unorm : float
1082
+ The maximum update norm relative to the weight norm.
1083
+ skip_zeros : bool
1084
+ Whether to skip zero-valued gradients or not (default: False).
1085
+ """
1086
+
1087
+ param_norm = 0.0
1088
+ if max_unorm > 0.0:
1089
+ param_norm = torch.norm(p.data.float())
1090
+
1091
+
1092
+ optim_func = None
1093
+ if g.dtype == torch.float32:
1094
+ optim_func = str2optimizer32bit[optimizer_name][0]
1095
+ elif g.dtype == torch.float16:
1096
+ optim_func = str2optimizer32bit[optimizer_name][1]
1097
+ elif (g.dtype == torch.bfloat16 and len(str2optimizer32bit[optimizer_name])==3):
1098
+ optim_func = str2optimizer32bit[optimizer_name][2]
1099
+ else:
1100
+ raise ValueError(f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}")
1101
+
1102
+ is_on_gpu([g, p, state1, state2, unorm_vec])
1103
+ prev_device = pre_call(g.device)
1104
+ optim_func(
1105
+ get_ptr(g),
1106
+ get_ptr(p),
1107
+ get_ptr(state1),
1108
+ get_ptr(state2),
1109
+ get_ptr(unorm_vec),
1110
+ ct.c_float(max_unorm),
1111
+ ct.c_float(param_norm),
1112
+ ct.c_float(beta1),
1113
+ ct.c_float(beta2),
1114
+ ct.c_float(eps),
1115
+ ct.c_float(weight_decay),
1116
+ ct.c_int32(step),
1117
+ ct.c_float(lr),
1118
+ ct.c_float(gnorm_scale),
1119
+ ct.c_bool(skip_zeros),
1120
+ ct.c_int32(g.numel()))
1121
+ post_call(prev_device)
1122
+
1123
+
1124
+ def optimizer_update_8bit(
1125
+ optimizer_name: str,
1126
+ g: Tensor,
1127
+ p: Tensor,
1128
+ state1: Tensor,
1129
+ state2: Tensor,
1130
+ beta1: float,
1131
+ beta2: float,
1132
+ eps: float,
1133
+ step: int,
1134
+ lr: float,
1135
+ qmap1: Tensor,
1136
+ qmap2: Tensor,
1137
+ max1: Tensor,
1138
+ max2: Tensor,
1139
+ new_max1: Tensor,
1140
+ new_max2: Tensor,
1141
+ weight_decay: float = 0.0,
1142
+ gnorm_scale: float = 1.0,
1143
+ unorm_vec: Tensor = None,
1144
+ max_unorm: float = 0.0,
1145
+ ) -> None:
1146
+ """
1147
+ Performs an inplace Adam update.
1148
+
1149
+ Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights.
1150
+ Uses AdamW formulation if weight decay > 0.0.
1151
+
1152
+ Parameters
1153
+ ----------
1154
+ optimizer_name : str
1155
+ The name of the optimizer. Choices {adam, momentum}
1156
+ g : torch.Tensor
1157
+ Gradient tensor.
1158
+ p : torch.Tensor
1159
+ Parameter tensor.
1160
+ state1 : torch.Tensor
1161
+ Adam state 1.
1162
+ state2 : torch.Tensor
1163
+ Adam state 2.
1164
+ beta1 : float
1165
+ Adam beta1.
1166
+ beta2 : float
1167
+ Adam beta2.
1168
+ eps : float
1169
+ Adam epsilon.
1170
+ weight_decay : float
1171
+ Weight decay.
1172
+ step : int
1173
+ Current optimizer step.
1174
+ lr : float
1175
+ The learning rate.
1176
+ qmap1 : torch.Tensor
1177
+ Quantization map for first Adam state.
1178
+ qmap2 : torch.Tensor
1179
+ Quantization map for second Adam state.
1180
+ max1 : torch.Tensor
1181
+ Max value for first Adam state update.
1182
+ max2 : torch.Tensor
1183
+ Max value for second Adam state update.
1184
+ new_max1 : torch.Tensor
1185
+ Max value for the next Adam update of the first state.
1186
+ new_max2 : torch.Tensor
1187
+ Max value for the next Adam update of the second state.
1188
+ gnorm_scale : float
1189
+ The factor to rescale the gradient to the max clip value.
1190
+ unorm_vec : torch.Tensor
1191
+ The tensor for the update norm.
1192
+ max_unorm : float
1193
+ The maximum update norm relative to the weight norm.
1194
+ """
1195
+
1196
+ param_norm = 0.0
1197
+ if max_unorm > 0.0:
1198
+ param_norm = torch.norm(p.data.float())
1199
+
1200
+ prev_device = pre_call(g.device)
1201
+ is_on_gpu([g, p, state1, state2, unorm_vec, qmap1, qmap2, max1, max2, new_max1, new_max2])
1202
+ if g.dtype == torch.float32 and state1.dtype == torch.uint8:
1203
+ str2optimizer8bit[optimizer_name][0](
1204
+ get_ptr(p),
1205
+ get_ptr(g),
1206
+ get_ptr(state1),
1207
+ get_ptr(state2),
1208
+ get_ptr(unorm_vec),
1209
+ ct.c_float(max_unorm),
1210
+ ct.c_float(param_norm),
1211
+ ct.c_float(beta1),
1212
+ ct.c_float(beta2),
1213
+ ct.c_float(eps),
1214
+ ct.c_int32(step),
1215
+ ct.c_float(lr),
1216
+ get_ptr(qmap1),
1217
+ get_ptr(qmap2),
1218
+ get_ptr(max1),
1219
+ get_ptr(max2),
1220
+ get_ptr(new_max1),
1221
+ get_ptr(new_max2),
1222
+ ct.c_float(weight_decay),
1223
+ ct.c_float(gnorm_scale),
1224
+ ct.c_int32(g.numel()),
1225
+ )
1226
+ elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
1227
+ str2optimizer8bit[optimizer_name][1](
1228
+ get_ptr(p),
1229
+ get_ptr(g),
1230
+ get_ptr(state1),
1231
+ get_ptr(state2),
1232
+ get_ptr(unorm_vec),
1233
+ ct.c_float(max_unorm),
1234
+ ct.c_float(param_norm),
1235
+ ct.c_float(beta1),
1236
+ ct.c_float(beta2),
1237
+ ct.c_float(eps),
1238
+ ct.c_int32(step),
1239
+ ct.c_float(lr),
1240
+ get_ptr(qmap1),
1241
+ get_ptr(qmap2),
1242
+ get_ptr(max1),
1243
+ get_ptr(max2),
1244
+ get_ptr(new_max1),
1245
+ get_ptr(new_max2),
1246
+ ct.c_float(weight_decay),
1247
+ ct.c_float(gnorm_scale),
1248
+ ct.c_int32(g.numel()),
1249
+ )
1250
+ else:
1251
+ raise ValueError(
1252
+ f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
1253
+ )
1254
+ post_call(prev_device)
1255
+
1256
+
1257
+ def optimizer_update_8bit_blockwise(
1258
+ optimizer_name: str,
1259
+ g: Tensor,
1260
+ p: Tensor,
1261
+ state1: Tensor,
1262
+ state2: Tensor,
1263
+ beta1: float,
1264
+ beta2: float,
1265
+ eps: float,
1266
+ step: int,
1267
+ lr: float,
1268
+ qmap1: Tensor,
1269
+ qmap2: Tensor,
1270
+ absmax1: Tensor,
1271
+ absmax2: Tensor,
1272
+ weight_decay: float = 0.0,
1273
+ gnorm_scale: float = 1.0,
1274
+ skip_zeros=False,
1275
+ ) -> None:
1276
+
1277
+ optim_func = None
1278
+ prev_device = pre_call(g.device)
1279
+ is_on_gpu([g, p, state1, state2, qmap1, qmap2, absmax1, absmax2])
1280
+ if g.dtype == torch.float32 and state1.dtype == torch.uint8:
1281
+ optim_func = str2optimizer8bit_blockwise[optimizer_name][0]
1282
+ elif g.dtype == torch.float16 and state1.dtype == torch.uint8:
1283
+ optim_func = str2optimizer8bit_blockwise[optimizer_name][1]
1284
+ elif (g.dtype == torch.bfloat16 and state1.dtype == torch.uint8 and
1285
+ len(str2optimizer8bit_blockwise[optimizer_name])==3):
1286
+ optim_func = str2optimizer8bit_blockwise[optimizer_name][2]
1287
+ else:
1288
+ raise ValueError(
1289
+ f"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}"
1290
+ )
1291
+ post_call(prev_device)
1292
+
1293
+ is_on_gpu([p, g, state1, state2, qmap1, qmap2, absmax1, absmax2])
1294
+
1295
+ prev_device = pre_call(g.device)
1296
+ optim_func(
1297
+ get_ptr(p),
1298
+ get_ptr(g),
1299
+ get_ptr(state1),
1300
+ get_ptr(state2),
1301
+ ct.c_float(beta1),
1302
+ ct.c_float(beta2),
1303
+ ct.c_float(eps),
1304
+ ct.c_int32(step),
1305
+ ct.c_float(lr),
1306
+ get_ptr(qmap1),
1307
+ get_ptr(qmap2),
1308
+ get_ptr(absmax1),
1309
+ get_ptr(absmax2),
1310
+ ct.c_float(weight_decay),
1311
+ ct.c_float(gnorm_scale),
1312
+ ct.c_bool(skip_zeros),
1313
+ ct.c_int32(g.numel()),
1314
+ )
1315
+ post_call(prev_device)
1316
+
1317
+ def percentile_clipping(
1318
+ grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5
1319
+ ):
1320
+ """Applies percentile clipping
1321
+
1322
+ grad: torch.Tensor
1323
+ The gradient tensor.
1324
+ gnorm_vec: torch.Tensor
1325
+ Vector of gradient norms. 100 elements expected.
1326
+ step: int
1327
+ The current optimiation steps (number of past gradient norms).
1328
+
1329
+ """
1330
+ prev_device = pre_call(grad.device)
1331
+ is_on_gpu([grad, gnorm_vec])
1332
+ if grad.dtype == torch.float32:
1333
+ lib.cpercentile_clipping_g32(
1334
+ get_ptr(grad),
1335
+ get_ptr(gnorm_vec),
1336
+ ct.c_int32(step),
1337
+ ct.c_int32(grad.numel()),
1338
+ )
1339
+ elif grad.dtype == torch.float16:
1340
+ lib.cpercentile_clipping_g16(
1341
+ get_ptr(grad),
1342
+ get_ptr(gnorm_vec),
1343
+ ct.c_int32(step),
1344
+ ct.c_int32(grad.numel()),
1345
+ )
1346
+ else:
1347
+ raise ValueError(f"Gradient type {grad.dtype} not supported!")
1348
+ post_call(prev_device)
1349
+
1350
+ current_gnorm = torch.sqrt(gnorm_vec[step % 100])
1351
+ vals, idx = torch.sort(gnorm_vec)
1352
+ clip_value = torch.sqrt(vals[percentile])
1353
+ gnorm_scale = 1.0
1354
+
1355
+ if current_gnorm > clip_value:
1356
+ gnorm_scale = clip_value / current_gnorm
1357
+
1358
+ return current_gnorm, clip_value, gnorm_scale
1359
+
1360
+
1361
+ def histogram_scatter_add_2d(
1362
+ histogram: Tensor, index1: Tensor, index2: Tensor, source: Tensor
1363
+ ):
1364
+ assert len(histogram.shape) == 2
1365
+ assert histogram.dtype == torch.float32
1366
+ assert source.dtype == torch.float32
1367
+ assert index1.dtype == torch.int32
1368
+ assert index2.dtype == torch.int32
1369
+
1370
+ assert histogram.device.type == "cuda"
1371
+ assert index1.device.type == "cuda"
1372
+ assert index2.device.type == "cuda"
1373
+ assert source.device.type == "cuda"
1374
+
1375
+ maxdim1 = ct.c_int32(histogram.shape[0])
1376
+ n = ct.c_int32(index1.numel())
1377
+ is_on_gpu([histogram, index1, index2, source])
1378
+ lib.chistogram_scatter_add_2d(get_ptr(histogram), get_ptr(index1), get_ptr(index2), get_ptr(source), maxdim1, n)
1379
+
1380
+ def check_matmul(A, B, out, transposed_A, transposed_B, expected_type=torch.int8):
1381
+ if not torch.cuda.is_initialized(): torch.cuda.init()
1382
+ if A.dtype != expected_type or B.dtype != expected_type:
1383
+ raise TypeError(
1384
+ f"Expected torch.int8 input tensors A and B, but got {A.dtype} and {B.dtype}"
1385
+ )
1386
+
1387
+ sA = A.shape
1388
+ sB = B.shape
1389
+ tA = transposed_A
1390
+ tB = transposed_B
1391
+
1392
+ correct = True
1393
+
1394
+ if len(sA) == 2 and len(sB) == 2:
1395
+ if not tA and not tB and A.shape[1] != B.shape[0]:
1396
+ correct = False
1397
+ elif tA and not tB and A.shape[0] != B.shape[0]:
1398
+ correct = False
1399
+ elif tA and tB and A.shape[0] != B.shape[1]:
1400
+ correct = False
1401
+ elif not tA and tB and A.shape[1] != B.shape[1]:
1402
+ correct = False
1403
+ elif len(sA) == 3 and len(sB) == 2:
1404
+ if not tA and not tB and A.shape[2] != B.shape[0]:
1405
+ correct = False
1406
+ elif tA and not tB and A.shape[1] != B.shape[0]:
1407
+ correct = False
1408
+ elif tA and tB and A.shape[1] != B.shape[1]:
1409
+ correct = False
1410
+ elif not tA and tB and A.shape[2] != B.shape[1]:
1411
+ correct = False
1412
+ elif len(sA) == 3 and len(sB) == 3:
1413
+ if not tA and not tB and A.shape[2] != B.shape[1]:
1414
+ correct = False
1415
+ elif tA and not tB and A.shape[1] != B.shape[1]:
1416
+ correct = False
1417
+ elif tA and tB and A.shape[1] != B.shape[2]:
1418
+ correct = False
1419
+ elif not tA and tB and A.shape[2] != B.shape[2]:
1420
+ correct = False
1421
+
1422
+ if out is not None:
1423
+ sout = out.shape
1424
+ # special case common in backprop
1425
+ if not correct and len(sA) == 3 and len(sB) == 3:
1426
+ if (
1427
+ sout[0] == sA[2]
1428
+ and sout[1] == sB[2]
1429
+ and sA[0] == sB[0]
1430
+ and sA[1] == sB[1]
1431
+ ):
1432
+ correct = True
1433
+ else:
1434
+ if len(sA) == 2 and len(sB) == 2:
1435
+ if not tA and not tB:
1436
+ sout = (sA[0], sB[1])
1437
+ elif tA and tB:
1438
+ sout = (sA[1], sB[0])
1439
+ elif tA and not tB:
1440
+ sout = (sA[1], sB[1])
1441
+ elif not tA and tB:
1442
+ sout = (sA[0], sB[0])
1443
+ elif len(sA) == 3 and len(sB) == 2:
1444
+ if not tA and not tB:
1445
+ sout = (sA[0], sA[1], sB[1])
1446
+ elif tA and tB:
1447
+ sout = (sA[0], sA[2], sB[0])
1448
+ elif tA and not tB:
1449
+ sout = (sA[0], sA[2], sB[1])
1450
+ elif not tA and tB:
1451
+ sout = (sA[0], sA[1], sB[0])
1452
+ elif len(sA) == 3 and len(sB) == 3:
1453
+ if not tA and not tB:
1454
+ sout = (sA[0], sA[1], sB[2])
1455
+ elif tA and tB:
1456
+ sout = (sA[0], sA[2], sB[1])
1457
+ elif tA and not tB:
1458
+ sout = (sA[0], sA[2], sB[2])
1459
+ elif not tA and tB:
1460
+ sout = (sA[0], sA[1], sB[1])
1461
+
1462
+ if not correct:
1463
+ raise ValueError(
1464
+ f"Tensor dimensions incorrect for matrix mulitiplication: A x B: {sA} x {sB} with transpose for A x B: {tA} x {tB}."
1465
+ )
1466
+
1467
+ return sout
1468
+
1469
+ def gemv_4bit(
1470
+ A: Tensor,
1471
+ B: Tensor,
1472
+ out: Tensor = None,
1473
+ transposed_A=False,
1474
+ transposed_B=False,
1475
+ state=None
1476
+ ):
1477
+ prev_device = pre_call(A.device)
1478
+ #sout = check_matmul(A, B, out, transposed_A, transposed_B, expected_type=A.dtype)
1479
+ if state is None:
1480
+ raise ValueError(f'state cannot None. gem_4bit( ) requires the state from quantize_4bit( )')
1481
+
1482
+ if A.numel() != A.shape[-1]:
1483
+ raise ValueError(f'Dimensions of A are invalid. Must be a vector with the leading dimensions of "1", e.g. [1, 1, 2048]')
1484
+
1485
+ Bshape = state[1]
1486
+ bout = Bshape[0]
1487
+ absmax, shape, dtype, blocksize, compressed_stats, quant_type, data_type = state
1488
+ if compressed_stats is not None:
1489
+ offset, state2 = compressed_stats
1490
+ absmax = dequantize_blockwise(absmax, state2)
1491
+ absmax += offset
1492
+
1493
+ if out is None:
1494
+ if len(A.shape) == 3:
1495
+ out = torch.empty(size=(A.shape[0], A.shape[1], bout), dtype=A.dtype, device=A.device)
1496
+ else:
1497
+ out = torch.empty(size=(A.shape[0], bout), dtype=A.dtype, device=A.device)
1498
+
1499
+ n = 1
1500
+ m = Bshape[0]
1501
+ k = Bshape[1]
1502
+ lda = Bshape[0]
1503
+ ldc = Bshape[0]
1504
+ ldb = (A.shape[-1]+1)//2
1505
+ is_on_gpu([B, A, out, absmax, state[-1]])
1506
+ m = ct.c_int32(m)
1507
+ n = ct.c_int32(n)
1508
+ k = ct.c_int32(k)
1509
+ lda = ct.c_int32(lda)
1510
+ ldb = ct.c_int32(ldb)
1511
+ ldc = ct.c_int32(ldc)
1512
+
1513
+ if B.dtype == torch.uint8:
1514
+ if A.dtype == torch.float16:
1515
+ lib.cgemm_4bit_inference_naive_fp16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
1516
+ elif A.dtype == torch.bfloat16:
1517
+ lib.cgemm_4bit_inference_naive_bf16(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
1518
+ elif A.dtype == torch.float32:
1519
+ lib.cgemm_4bit_inference_naive_fp32(m, n, k, get_ptr(A), get_ptr(B), get_ptr(absmax), get_ptr(state[-1]), get_ptr(out), lda, ldb, ldc, ct.c_int32(state[3]))
1520
+ else:
1521
+ raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
1522
+
1523
+ else:
1524
+ raise NotImplementedError(f'Matmul not implemented for data type {A.dtype}')
1525
+
1526
+ post_call(prev_device)
1527
+
1528
+ return out
1529
+
1530
+ def igemm(
1531
+ A: Tensor,
1532
+ B: Tensor,
1533
+ out: Tensor = None,
1534
+ transposed_A=False,
1535
+ transposed_B=False,
1536
+ ):
1537
+ sout = check_matmul(A, B, out, transposed_A, transposed_B)
1538
+ if out is None:
1539
+ out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)
1540
+ if len(A.shape) == 3 and len(B.shape) == 3:
1541
+ if A.shape[0] == B.shape[0] and A.shape[2] == B.shape[1]:
1542
+ return batched_igemm(A, B, out)
1543
+
1544
+ sA = A.shape
1545
+ sB = B.shape
1546
+ if transposed_A and len(sA) == 2:
1547
+ sA = (sA[1], sA[0])
1548
+ elif transposed_A and len(sA) == 3:
1549
+ sA = (sA[0], sA[2], sA[0])
1550
+ if transposed_B and len(sB) == 2:
1551
+ sB = (sB[1], sB[0])
1552
+ elif transposed_B and len(sB) == 3:
1553
+ sB = (sB[0], sB[2], sB[0])
1554
+ # this is a mess: cuBLAS expect column major, but PyTorch is row major.
1555
+ # So to perform the matrix multiplication, we have to treat A, B, and C matrices
1556
+ # (transpose of row major is column major)
1557
+ # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
1558
+
1559
+ # matrices in the input arguments for cuBLAS
1560
+ # column major: A @ B = C: [m, k] @ [k, n] = [m, n]
1561
+ # row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]
1562
+ # column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]
1563
+ if len(sB) == 2:
1564
+ if B.stride()[0] == B.shape[1]:
1565
+ transposed_B = False
1566
+ elif B.stride()[1] == B.shape[0]:
1567
+ transposed_B = True
1568
+ if len(A.shape) == 2:
1569
+ if A.stride()[0] == A.shape[1]:
1570
+ transposed_A = False
1571
+ elif A.stride()[1] == A.shape[0]:
1572
+ transposed_A = True
1573
+ else:
1574
+ if A.stride()[1] == A.shape[2]:
1575
+ transposed_A = False
1576
+ elif A.stride()[2] == A.shape[1]:
1577
+ transposed_A = True
1578
+
1579
+ if len(sA) == 2:
1580
+ n = sA[0]
1581
+ ldb = A.stride()[1 if transposed_A else 0]
1582
+ elif len(sA) == 3 and len(sB) == 2:
1583
+ n = sA[0] * sA[1]
1584
+ ldb = sA[2]
1585
+
1586
+ m = sB[1]
1587
+ k = sB[0]
1588
+ lda = B.stride()[(1 if transposed_B else 0)]
1589
+ ldc = sB[1]
1590
+ elif len(sB) == 3:
1591
+ # special case
1592
+ assert len(sA) == 3
1593
+ if not (sA[0] == sB[0] and sA[1] == sB[1]):
1594
+ raise ValueError(
1595
+ f"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}"
1596
+ )
1597
+
1598
+ transposed_A = True
1599
+ transposed_B = False
1600
+
1601
+ m = sB[2]
1602
+ n = sA[2]
1603
+ k = sB[0] * sB[1]
1604
+
1605
+ lda = m
1606
+ ldb = sA[2]
1607
+ ldc = m
1608
+
1609
+ ptr = CUBLAS_Context.get_instance().get_context(A.device)
1610
+
1611
+ # B^T @ A^T = C^T
1612
+ # [km, nk -> mn]
1613
+ is_on_gpu([B, A, out])
1614
+ lib.cigemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k),
1615
+ get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc))
1616
+ return out
1617
+
1618
+
1619
+ def batched_igemm(
1620
+ A: Tensor,
1621
+ B: Tensor,
1622
+ out: Tensor = None,
1623
+ transposed_A=False,
1624
+ transposed_B=False,
1625
+ ):
1626
+ if not len(A.shape) == 3 or not len(B.shape) == 3:
1627
+ raise ValueError(
1628
+ f"Expected 3-dimensional tensors for bmm, but got shapes A and B: {A.shape} and {B.shape}"
1629
+ )
1630
+ sout = check_matmul(A, B, out, transposed_A, transposed_B)
1631
+ if out is None:
1632
+ out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)
1633
+
1634
+ if B.is_contiguous():
1635
+ lda = B.stride()[1]
1636
+ transposed_A = False
1637
+ else:
1638
+ s = B.stride()
1639
+ if s[0] != B.shape[0]:
1640
+ B = B.contiguous()
1641
+ lda = B.stride()[1]
1642
+ elif s[2] == B.shape[1]:
1643
+ transposed_A = True
1644
+ lda = B.stride()[2]
1645
+ else:
1646
+ if s[2] == 1:
1647
+ B = B.contiguous()
1648
+ lda = B.stride()[1]
1649
+ elif s[1] == 1:
1650
+ B = B.contiguous()
1651
+ lda = B.stride()[1]
1652
+ else:
1653
+ B = B.contiguous()
1654
+ lda = B.stride()[1]
1655
+
1656
+ if A.is_contiguous():
1657
+ ldb = A.stride()[1]
1658
+ transposed_B = False
1659
+ else:
1660
+ s = A.stride()
1661
+ if s[0] != A.shape[0]:
1662
+ A = A.contiguous()
1663
+ ldb = A.stride()[1]
1664
+ transposed_B = False
1665
+ elif s[2] == A.shape[1]:
1666
+ ldb = A.stride()[2]
1667
+ transposed_B = True
1668
+ else:
1669
+ A = A.contiguous()
1670
+ ldb = A.stride()[1]
1671
+ transposed_B = False
1672
+
1673
+ # this is a mess: cuBLAS expect column major, but PyTorch is row major.
1674
+ # So to perform the matrix multiplication, we have to treat A, B, and C matrices
1675
+ # (transpose of row major is column major)
1676
+ # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these
1677
+ # matrices in the input arguments for cuBLAS
1678
+
1679
+ # column major: A @ B = C: [batch, m, k] @ [batch, k, n] = [batch, m, n]
1680
+ # row major: B^T @ A^T = C^T: [batch, m, k] @ [batch, k, n] = [batch, m, n]
1681
+ # column major with row major layout: B^T @ A^T = C^T: [batch, k, m] @ [batch, n, k] = [batch, n, m]
1682
+ num_batch = A.shape[0]
1683
+ n = A.shape[1]
1684
+ m = B.shape[2]
1685
+ k = B.shape[1]
1686
+
1687
+ ldc = m
1688
+
1689
+ strideA = B.shape[1] * B.shape[2]
1690
+ strideB = A.shape[1] * A.shape[2]
1691
+ strideC = A.shape[1] * B.shape[2]
1692
+
1693
+ ptr = CUBLAS_Context.get_instance().get_context(A.device)
1694
+
1695
+ is_on_gpu([B, A, out])
1696
+ lib.cbatched_igemm(ptr, ct.c_bool(transposed_B), ct.c_bool(transposed_A), ct.c_int32(m), ct.c_int32(n), ct.c_int32(k),
1697
+ get_ptr(B), get_ptr(A), get_ptr(out), ct.c_int32(lda), ct.c_int32(ldb), ct.c_int32(ldc),
1698
+ ct.c_long(strideA), ct.c_long(strideB), ct.c_long(strideC), ct.c_uint32(num_batch))
1699
+ return out
1700
+
1701
+
1702
+ def igemmlt(A, B, SA, SB, out=None, Sout=None, dtype=torch.int32):
1703
+ shapeA = SA[0]
1704
+ shapeB = SB[0]
1705
+ dimsA = len(shapeA)
1706
+ dimsB = len(shapeB)
1707
+ assert dimsB == 2, 'Only two dimensional matrices are supported for argument B'
1708
+ if dimsA == 2:
1709
+ m = shapeA[0]
1710
+ elif dimsA == 3:
1711
+ m = shapeA[0] * shapeA[1]
1712
+
1713
+ rows = n = shapeB[0]
1714
+ assert prod(list(shapeA)) > 0, f'Input tensor dimensions need to be > 0: {shapeA}'
1715
+
1716
+ # if the tensor is empty, return a transformed empty tensor with the right dimensions
1717
+ if shapeA[0] == 0 and dimsA == 2:
1718
+ return torch.empty((0, shapeB[0]), device=A.device, dtype=torch.float16)
1719
+ elif shapeA[1] == 0 and dimsA == 3:
1720
+ return torch.empty(tuple(shapeA[:2] + [shapeB[0]]), device=A.device, dtype=torch.float16)
1721
+
1722
+ if dimsA == 2 and out is None:
1723
+ out, Sout = get_transform_buffer(
1724
+ (shapeA[0], shapeB[0]), dtype, A.device, "col32", "row"
1725
+ )
1726
+ elif dimsA == 3 and out is None:
1727
+ out, Sout = get_transform_buffer(
1728
+ (shapeA[0], shapeA[1], shapeB[0]), dtype, A.device, "col32", "row"
1729
+ )
1730
+
1731
+ assert dimsB != 3, "len(B.shape)==3 not supported"
1732
+ assert A.device.type == "cuda"
1733
+ assert B.device.type == "cuda"
1734
+ assert A.dtype == torch.int8
1735
+ assert B.dtype == torch.int8
1736
+ assert out.dtype == dtype
1737
+ assert SA[1] == "col32"
1738
+ assert SB[1] in ["col_turing", "col_ampere"]
1739
+ assert Sout[1] == "col32"
1740
+ assert (
1741
+ shapeA[-1] == shapeB[-1]
1742
+ ), f"Matmullt only supports A @ B^T. Inner matrix dimensions do not match: A @ B = {shapeA} @ {shapeB}"
1743
+ formatB = SB[1]
1744
+ prev_device = A.device
1745
+ torch.cuda.set_device(A.device)
1746
+
1747
+ ptr = CUBLAS_Context.get_instance().get_context(A.device)
1748
+ ptrA = get_ptr(A)
1749
+ ptrB = get_ptr(B)
1750
+ ptrC = get_ptr(out)
1751
+
1752
+ k = shapeA[-1]
1753
+ lda = ct.c_int32(m * 32)
1754
+ if formatB == "col_turing":
1755
+ # turing: tiles with rows filled up to multiple of 8 rows by 32 columns
1756
+ # n = rows
1757
+ ldb = ct.c_int32(((rows + 7) // 8) * 8 * 32)
1758
+ else:
1759
+ # ampere: tiles with rows filled up to multiple of 32 rows by 32 columns
1760
+ # n = rows
1761
+ ldb = ct.c_int32(((rows + 31) // 32) * 32 * 32)
1762
+
1763
+ ldc = ct.c_int32(m * 32)
1764
+ m = ct.c_int32(m)
1765
+ n = ct.c_int32(n)
1766
+ k = ct.c_int32(k)
1767
+
1768
+ has_error = 0
1769
+ ptrRowScale = get_ptr(None)
1770
+ is_on_gpu([A, B, out])
1771
+ if formatB == 'col_turing':
1772
+ if dtype == torch.int32:
1773
+ has_error = lib.cigemmlt_turing_32(
1774
+ ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
1775
+ )
1776
+ else:
1777
+ has_error = lib.cigemmlt_turing_8(
1778
+ ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
1779
+ )
1780
+ elif formatB == "col_ampere":
1781
+ if dtype == torch.int32:
1782
+ has_error = lib.cigemmlt_ampere_32(
1783
+ ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
1784
+ )
1785
+ else:
1786
+ has_error = lib.cigemmlt_ampere_8(
1787
+ ptr, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc
1788
+ )
1789
+
1790
+ if has_error == 1:
1791
+ print(f'A: {shapeA}, B: {shapeB}, C: {Sout[0]}; (lda, ldb, ldc): {(lda, ldb, ldc)}; (m, n, k): {(m, n, k)}')
1792
+ raise Exception('cublasLt ran into an error!')
1793
+
1794
+ torch.cuda.set_device(prev_device)
1795
+
1796
+ return out, Sout
1797
+
1798
+
1799
+ def mm_dequant(
1800
+ A,
1801
+ quant_state,
1802
+ row_stats,
1803
+ col_stats,
1804
+ out=None,
1805
+ new_row_stats=None,
1806
+ new_col_stats=None,
1807
+ bias=None
1808
+ ):
1809
+ assert A.dtype == torch.int32
1810
+ if bias is not None: assert bias.dtype == torch.float16
1811
+ out_shape = quant_state[0]
1812
+ if len(out_shape) == 3:
1813
+ out_shape = (out_shape[0] * out_shape[1], out_shape[2])
1814
+
1815
+ if out is None:
1816
+ out = torch.empty(out_shape, dtype=torch.float16, device=A.device)
1817
+ if new_row_stats is None:
1818
+ new_row_stats = torch.empty(
1819
+ out_shape[0], dtype=torch.float32, device=A.device
1820
+ )
1821
+ if new_col_stats is None:
1822
+ new_col_stats = torch.empty(
1823
+ out_shape[1], dtype=torch.float32, device=A.device
1824
+ )
1825
+ assert (
1826
+ new_row_stats.shape[0] == row_stats.shape[0]
1827
+ ), f"{new_row_stats.shape} vs {row_stats.shape}"
1828
+ assert (
1829
+ new_col_stats.shape[0] == col_stats.shape[0]
1830
+ ), f"{new_col_stats.shape} vs {col_stats.shape}"
1831
+
1832
+ prev_device = pre_call(A.device)
1833
+ ptrA = get_ptr(A)
1834
+ ptrOut = get_ptr(out)
1835
+ ptrRowStats = get_ptr(row_stats)
1836
+ ptrColStats = get_ptr(col_stats)
1837
+ ptrNewRowStats = get_ptr(new_row_stats)
1838
+ ptrNewColStats = get_ptr(new_col_stats)
1839
+ ptrBias = get_ptr(bias)
1840
+ numRows = ct.c_int32(out_shape[0])
1841
+ numCols = ct.c_int32(out_shape[1])
1842
+
1843
+ is_on_gpu([A, row_stats, col_stats, out, new_row_stats, new_col_stats, bias])
1844
+ lib.cdequant_mm_int32_fp16(ptrA, ptrRowStats, ptrColStats, ptrOut, ptrNewRowStats, ptrNewColStats, ptrBias, numRows, numCols)
1845
+ post_call(prev_device)
1846
+
1847
+ return out
1848
+
1849
+
1850
+ def get_colrow_absmax(
1851
+ A, row_stats=None, col_stats=None, nnz_block_ptr=None, threshold=0.0
1852
+ ):
1853
+ assert A.dtype == torch.float16
1854
+ device = A.device
1855
+
1856
+ cols = A.shape[-1]
1857
+ if len(A.shape) == 3:
1858
+ rows = A.shape[0] * A.shape[1]
1859
+ else:
1860
+ rows = A.shape[0]
1861
+
1862
+ col_tiles = (cols + 255) // 256
1863
+ tiled_rows = ((rows + 15) // 16) * 16
1864
+ if row_stats is None:
1865
+ row_stats = torch.empty(
1866
+ (rows,), dtype=torch.float32, device=device
1867
+ ).fill_(-50000.0)
1868
+ if col_stats is None:
1869
+ col_stats = torch.empty(
1870
+ (cols,), dtype=torch.float32, device=device
1871
+ ).fill_(-50000.0)
1872
+
1873
+ if nnz_block_ptr is None and threshold > 0.0:
1874
+ nnz_block_ptr = torch.zeros(
1875
+ ((tiled_rows * col_tiles) + 1,), dtype=torch.int32, device=device
1876
+ )
1877
+
1878
+ ptrA = get_ptr(A)
1879
+ ptrRowStats = get_ptr(row_stats)
1880
+ ptrColStats = get_ptr(col_stats)
1881
+ ptrNnzrows = get_ptr(nnz_block_ptr)
1882
+ rows = ct.c_int32(rows)
1883
+ cols = ct.c_int32(cols)
1884
+
1885
+ prev_device = pre_call(A.device)
1886
+ is_on_gpu([A, row_stats, col_stats, nnz_block_ptr])
1887
+ lib.cget_col_row_stats(ptrA, ptrRowStats, ptrColStats, ptrNnzrows, ct.c_float(threshold), rows, cols)
1888
+ post_call(prev_device)
1889
+
1890
+ if threshold > 0.0:
1891
+ nnz_block_ptr.cumsum_(0)
1892
+
1893
+ return row_stats, col_stats, nnz_block_ptr
1894
+
1895
+
1896
+ class COOSparseTensor:
1897
+ def __init__(self, rows, cols, nnz, rowidx, colidx, values):
1898
+ assert rowidx.dtype == torch.int32
1899
+ assert colidx.dtype == torch.int32
1900
+ assert values.dtype == torch.float16
1901
+ assert values.numel() == nnz
1902
+ assert rowidx.numel() == nnz
1903
+ assert colidx.numel() == nnz
1904
+
1905
+ self.rows = rows
1906
+ self.cols = cols
1907
+ self.nnz = nnz
1908
+ self.rowidx = rowidx
1909
+ self.colidx = colidx
1910
+ self.values = values
1911
+
1912
+
1913
+ class CSRSparseTensor:
1914
+ def __init__(self, rows, cols, nnz, rowptr, colidx, values):
1915
+ assert rowptr.dtype == torch.int32
1916
+ assert colidx.dtype == torch.int32
1917
+ assert values.dtype == torch.float16
1918
+ assert values.numel() == nnz
1919
+ assert colidx.numel() == nnz
1920
+ assert rowptr.numel() == rows + 1
1921
+
1922
+ self.rows = rows
1923
+ self.cols = cols
1924
+ self.nnz = nnz
1925
+ self.rowptr = rowptr
1926
+ self.colidx = colidx
1927
+ self.values = values
1928
+
1929
+
1930
+ class CSCSparseTensor:
1931
+ def __init__(self, rows, cols, nnz, colptr, rowidx, values):
1932
+ assert colptr.dtype == torch.int32
1933
+ assert rowidx.dtype == torch.int32
1934
+ assert values.dtype == torch.float16
1935
+ assert values.numel() == nnz
1936
+ assert rowidx.numel() == nnz
1937
+ assert colptr.numel() == cols + 1
1938
+
1939
+ self.rows = rows
1940
+ self.cols = cols
1941
+ self.nnz = nnz
1942
+ self.colptr = colptr
1943
+ self.rowidx = rowidx
1944
+ self.values = values
1945
+
1946
+
1947
+ def coo2csr(cooA):
1948
+ values, counts = torch.unique(cooA.rowidx, return_counts=True)
1949
+ values.add_(1)
1950
+ rowptr = torch.zeros(
1951
+ (cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device
1952
+ )
1953
+ rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)
1954
+ rowptr.cumsum_(0)
1955
+ return CSRSparseTensor(
1956
+ cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values
1957
+ )
1958
+
1959
+
1960
+ def coo2csc(cooA):
1961
+ val, col2rowidx = torch.sort(cooA.colidx)
1962
+ rowidx = cooA.rowidx[col2rowidx]
1963
+ values = cooA.values[col2rowidx]
1964
+ colvalues, counts = torch.unique(val, return_counts=True)
1965
+ colvalues.add_(1)
1966
+ colptr = torch.zeros(
1967
+ (cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device
1968
+ )
1969
+ colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)
1970
+ colptr.cumsum_(0)
1971
+ return CSCSparseTensor(
1972
+ cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values
1973
+ )
1974
+
1975
+
1976
+ def coo_zeros(rows, cols, nnz, device, dtype=torch.half):
1977
+ rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)
1978
+ colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)
1979
+ values = torch.zeros((nnz,), dtype=dtype, device=device)
1980
+ return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)
1981
+
1982
+
1983
+ def double_quant(
1984
+ A, col_stats=None, row_stats=None, out_col=None, out_row=None, threshold=0.0
1985
+ ):
1986
+ device = A.device
1987
+ assert A.dtype == torch.half
1988
+ assert device.type == "cuda"
1989
+ prev_device = pre_call(A.device)
1990
+
1991
+ cols = A.shape[-1]
1992
+ if len(A.shape) == 3:
1993
+ rows = A.shape[0] * A.shape[1]
1994
+ else:
1995
+ rows = A.shape[0]
1996
+
1997
+ if row_stats is None or col_stats is None:
1998
+ row_stats, col_stats, nnz_row_ptr = get_colrow_absmax(
1999
+ A, threshold=threshold
2000
+ )
2001
+
2002
+ if out_col is None:
2003
+ out_col = torch.zeros(A.shape, device=device, dtype=torch.int8)
2004
+ if out_row is None:
2005
+ out_row = torch.zeros(A.shape, device=device, dtype=torch.int8)
2006
+
2007
+ coo_tensor = None
2008
+ ptrA = get_ptr(A)
2009
+ ptrColStats = get_ptr(col_stats)
2010
+ ptrRowStats = get_ptr(row_stats)
2011
+ ptrOutCol = get_ptr(out_col)
2012
+ ptrOutRow = get_ptr(out_row)
2013
+
2014
+ is_on_gpu([A, col_stats, row_stats, out_col, out_row])
2015
+ if threshold > 0.0:
2016
+ nnz = nnz_row_ptr[-1].item()
2017
+ if nnz > 0:
2018
+ coo_tensor = coo_zeros(
2019
+ A.shape[0], A.shape[1], nnz_row_ptr[-1].item(), device
2020
+ )
2021
+ ptrRowIdx = get_ptr(coo_tensor.rowidx)
2022
+ ptrColIdx = get_ptr(coo_tensor.colidx)
2023
+ ptrVal = get_ptr(coo_tensor.values)
2024
+ ptrRowPtr = get_ptr(nnz_row_ptr)
2025
+
2026
+ lib.cdouble_rowcol_quant(
2027
+ ptrA,
2028
+ ptrRowStats,
2029
+ ptrColStats,
2030
+ ptrOutCol,
2031
+ ptrOutRow,
2032
+ ptrRowIdx,
2033
+ ptrColIdx,
2034
+ ptrVal,
2035
+ ptrRowPtr,
2036
+ ct.c_float(threshold),
2037
+ ct.c_int32(rows),
2038
+ ct.c_int32(cols),
2039
+ )
2040
+ val, idx = torch.sort(coo_tensor.rowidx)
2041
+ coo_tensor.rowidx = val
2042
+ coo_tensor.colidx = coo_tensor.colidx[idx]
2043
+ coo_tensor.values = coo_tensor.values[idx]
2044
+ else:
2045
+ lib.cdouble_rowcol_quant(
2046
+ ptrA,
2047
+ ptrRowStats,
2048
+ ptrColStats,
2049
+ ptrOutCol,
2050
+ ptrOutRow,
2051
+ None,
2052
+ None,
2053
+ None,
2054
+ None,
2055
+ ct.c_float(0.0),
2056
+ ct.c_int32(rows),
2057
+ ct.c_int32(cols),
2058
+ )
2059
+ else:
2060
+ lib.cdouble_rowcol_quant(
2061
+ ptrA,
2062
+ ptrRowStats,
2063
+ ptrColStats,
2064
+ ptrOutCol,
2065
+ ptrOutRow,
2066
+ None,
2067
+ None,
2068
+ None,
2069
+ None,
2070
+ ct.c_float(threshold),
2071
+ ct.c_int32(rows),
2072
+ ct.c_int32(cols),
2073
+ )
2074
+ post_call(prev_device)
2075
+
2076
+ return out_row, out_col, row_stats, col_stats, coo_tensor
2077
+
2078
+
2079
+ def transform(A, to_order, from_order='row', out=None, transpose=False, state=None, ld=None):
2080
+ prev_device = pre_call(A.device)
2081
+ if state is None: state = (A.shape, from_order)
2082
+ else: from_order = state[1]
2083
+ if out is None: out, new_state = get_transform_buffer(state[0], A.dtype, A.device, to_order, state[1], transpose)
2084
+ else: new_state = (state[0], to_order) # (shape, order)
2085
+
2086
+ shape = state[0]
2087
+ if len(shape) == 2:
2088
+ dim1 = ct.c_int32(shape[0])
2089
+ dim2 = ct.c_int32(shape[1])
2090
+ else:
2091
+ dim1 = ct.c_int32(shape[0] * shape[1])
2092
+ dim2 = ct.c_int32(shape[2])
2093
+
2094
+ is_on_gpu([A, out])
2095
+ if to_order == 'col32':
2096
+ if transpose:
2097
+ lib.ctransform_row2col32T(get_ptr(A), get_ptr(out), dim1, dim2)
2098
+ else:
2099
+ lib.ctransform_row2col32(get_ptr(A), get_ptr(out), dim1, dim2)
2100
+ elif to_order == "col_turing":
2101
+ if transpose:
2102
+ lib.ctransform_row2turingT(get_ptr(A), get_ptr(out), dim1, dim2)
2103
+ else:
2104
+ lib.ctransform_row2turing(get_ptr(A), get_ptr(out), dim1, dim2)
2105
+ elif to_order == "col_ampere":
2106
+ if transpose:
2107
+ lib.ctransform_row2ampereT(get_ptr(A), get_ptr(out), dim1, dim2)
2108
+ else:
2109
+ lib.ctransform_row2ampere(get_ptr(A), get_ptr(out), dim1, dim2)
2110
+ elif to_order == "row":
2111
+ if from_order == "col_turing":
2112
+ lib.ctransform_turing2row(get_ptr(A), get_ptr(out), dim1, dim2)
2113
+ elif from_order == "col_ampere":
2114
+ lib.ctransform_ampere2row(get_ptr(A), get_ptr(out), dim1, dim2)
2115
+ else:
2116
+ raise NotImplementedError(f'Transform function not implemented: From {from_order} to {to_order}')
2117
+
2118
+ post_call(prev_device)
2119
+
2120
+ return out, new_state
2121
+
2122
+
2123
+ def spmm_coo(cooA, B, out=None):
2124
+ if out is None:
2125
+ out = torch.empty(
2126
+ (cooA.rows, B.shape[1]), device=B.device, dtype=B.dtype
2127
+ )
2128
+ nnz = cooA.nnz
2129
+ assert cooA.rowidx.numel() == nnz
2130
+ assert cooA.colidx.numel() == nnz
2131
+ assert cooA.values.numel() == nnz
2132
+ assert cooA.cols == B.shape[0]
2133
+
2134
+ transposed_B = False if B.is_contiguous() else True
2135
+
2136
+ ldb = B.stride()[(1 if transposed_B else 0)]
2137
+ ldc = B.shape[1]
2138
+
2139
+ ptr = Cusparse_Context.get_instance().context
2140
+
2141
+ ptrRowidx = get_ptr(cooA.rowidx)
2142
+ ptrColidx = get_ptr(cooA.colidx)
2143
+ ptrValues = get_ptr(cooA.values)
2144
+ ptrB = get_ptr(B)
2145
+ ptrC = get_ptr(out)
2146
+ cnnz = ct.c_int32(cooA.nnz)
2147
+ crowsA = ct.c_int32(cooA.rows)
2148
+ ccolsA = ct.c_int32(cooA.cols)
2149
+ ccolsB = ct.c_int32(B.shape[1])
2150
+ cldb = ct.c_int32(ldb)
2151
+ cldc = ct.c_int32(ldc)
2152
+
2153
+ is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out])
2154
+ lib.cspmm_coo(ptr, ptrRowidx, ptrColidx, ptrValues, cnnz, crowsA, ccolsA, ccolsB, cldb, ptrB, cldc, ptrC, ct.c_bool(transposed_B))
2155
+
2156
+ return out
2157
+
2158
+
2159
+ def spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):
2160
+ if out is None:
2161
+ out = torch.zeros(
2162
+ (cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype
2163
+ )
2164
+ nnz = cooA.nnz
2165
+ prev_device = pre_call(B.device)
2166
+ assert cooA.rowidx.numel() == nnz
2167
+ assert cooA.colidx.numel() == nnz
2168
+ assert cooA.values.numel() == nnz
2169
+ assert cooA.cols == B.shape[0], f"{cooA.cols} vs {B.shape}"
2170
+
2171
+ transposed_B = False if B.is_contiguous() else True
2172
+
2173
+ ldb = B.stride()[(1 if transposed_B else 0)]
2174
+ ldc = B.shape[1]
2175
+
2176
+ values, counts = torch.unique(cooA.rowidx, return_counts=True)
2177
+ offset = counts.cumsum(0).int()
2178
+ max_count, max_idx = torch.sort(counts, descending=True)
2179
+ max_idx = max_idx.int()
2180
+ max_count = max_count.int()
2181
+ assert (
2182
+ max_count[0] <= 32
2183
+ ), f"Current max count per row is 8 but found {max_count[0]}."
2184
+ assert B.dtype in [torch.float16, torch.int8]
2185
+ ptrOffset = get_ptr(offset)
2186
+ ptrMaxCount = get_ptr(max_count)
2187
+ ptrMaxIdx = get_ptr(max_idx)
2188
+
2189
+ ptrRowidx = get_ptr(cooA.rowidx)
2190
+ ptrColidx = get_ptr(cooA.colidx)
2191
+ ptrValues = get_ptr(cooA.values)
2192
+ ptrB = get_ptr(B)
2193
+ ptrC = get_ptr(out)
2194
+ ptrDequantStats = get_ptr(dequant_stats)
2195
+ cnnz_rows = ct.c_int32(counts.numel())
2196
+ cnnz = ct.c_int32(cooA.nnz)
2197
+ crowsA = ct.c_int32(cooA.rows)
2198
+ ccolsA = ct.c_int32(cooA.cols)
2199
+ crowsB = ct.c_int32(B.shape[1])
2200
+ ccolsB = ct.c_int32(B.shape[1])
2201
+ cldb = ct.c_int32(ldb)
2202
+ cldc = ct.c_int32(ldc)
2203
+
2204
+ is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats])
2205
+ if B.dtype == torch.float16:
2206
+ lib.cspmm_coo_very_sparse_naive_fp16(
2207
+ ptrMaxCount,
2208
+ ptrMaxIdx,
2209
+ ptrOffset,
2210
+ ptrRowidx,
2211
+ ptrColidx,
2212
+ ptrValues,
2213
+ ptrB,
2214
+ ptrC,
2215
+ ptrDequantStats,
2216
+ cnnz_rows,
2217
+ cnnz,
2218
+ crowsA,
2219
+ crowsB,
2220
+ ccolsB,
2221
+ )
2222
+ elif B.dtype == torch.int8:
2223
+ lib.cspmm_coo_very_sparse_naive_int8(
2224
+ ptrMaxCount,
2225
+ ptrMaxIdx,
2226
+ ptrOffset,
2227
+ ptrRowidx,
2228
+ ptrColidx,
2229
+ ptrValues,
2230
+ ptrB,
2231
+ ptrC,
2232
+ ptrDequantStats,
2233
+ cnnz_rows,
2234
+ cnnz,
2235
+ crowsA,
2236
+ crowsB,
2237
+ ccolsB,
2238
+ )
2239
+ # else: assertion error
2240
+ post_call(prev_device)
2241
+
2242
+ return out
2243
+
2244
+
2245
+ C = 127.0
2246
+
2247
+
2248
+ def vectorwise_quant(x, dim=1, quant_type="vector"):
2249
+ if quant_type == "linear":
2250
+ max1 = torch.abs(x).max().float()
2251
+ xq = torch.round(x / max1 * 127).to(torch.int8)
2252
+ return xq, max1
2253
+ elif quant_type in ["vector", "row"]:
2254
+ max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)
2255
+ xq = torch.round(x * (C / max1)).to(torch.int8)
2256
+ return xq, max1
2257
+ elif quant_type == "zeropoint":
2258
+ dtype = x.dtype
2259
+ x = x.float()
2260
+ dyna = x.max() - x.min()
2261
+ if dyna == 0:
2262
+ dyna = 1
2263
+ qx = 255.0 / dyna
2264
+ minx = x.min()
2265
+ zpx = torch.round(minx * qx)
2266
+ x = torch.round(qx * x - zpx) + zpx
2267
+ return x, qx
2268
+ elif quant_type in ["vector-zeropoint", "row-zeropoint"]:
2269
+ dtype = x.dtype
2270
+ x = x.float()
2271
+ dyna = torch.amax(x, dim=dim, keepdim=True) - torch.amin(
2272
+ x, dim=dim, keepdim=True
2273
+ )
2274
+ dyna[dyna == 0] = 1
2275
+ qx = 255.0 / dyna
2276
+ minx = torch.amin(x, dim=dim, keepdim=True)
2277
+ zpx = torch.round(minx * qx)
2278
+ x = torch.round(qx * x - zpx) + zpx
2279
+ return x, qx
2280
+ elif quant_type == "truncated-vector":
2281
+ with torch.no_grad():
2282
+ absx = torch.abs(x)
2283
+ max1 = torch.amax(absx, dim=dim, keepdim=True)
2284
+ max1 = max1 * 0.7
2285
+ idx = absx > max1.expand_as(absx)
2286
+ sign = torch.sign(x[idx])
2287
+ x[idx] = max1.expand_as(absx)[idx] * sign
2288
+ xq = torch.round(x / max1 * C).to(torch.int8)
2289
+ return xq, max1
2290
+ else:
2291
+ return None
2292
+
2293
+
2294
+ def vectorwise_dequant(xq, max1, quant_type="vector"):
2295
+ if quant_type == "vector":
2296
+ x = (xq / C * max1).to(torch.float32)
2297
+ return x
2298
+ else:
2299
+ return None
2300
+
2301
+
2302
+ def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half, quant_type="vector"):
2303
+ if quant_type == "linear":
2304
+ norm = S1 * S2 / (C * C)
2305
+ # double cast needed to prevent overflows
2306
+ return (xq.float() * norm).to(dtype)
2307
+ elif quant_type == "zeropoint":
2308
+ norm = 1.0 / (S1 * S2)
2309
+ return (xq.float() * norm).to(dtype)
2310
+ elif quant_type == "row-zeropoint":
2311
+ norm = 1.0 / (S1 * S2)
2312
+ x = xq.float()
2313
+ if len(S1.shape) == 3 and len(x.shape) == 2:
2314
+ S1 = S1.squeeze(0)
2315
+ if len(S2.shape) == 3 and len(x.shape) == 2:
2316
+ S2 = S2.squeeze(0)
2317
+ if len(S1.shape) == 2:
2318
+ x *= norm
2319
+ else:
2320
+ x *= norm
2321
+ return x.to(dtype)
2322
+ elif quant_type == "vector-zeropoint":
2323
+ x = xq.float()
2324
+ if len(S1.shape) == 3 and len(x.shape) == 2:
2325
+ S1 = S1.squeeze(0)
2326
+ if len(S2.shape) == 3 and len(x.shape) == 2:
2327
+ S2 = S2.squeeze(0)
2328
+ if len(S1.shape) == 2:
2329
+ x *= 1.0 / S1
2330
+ else:
2331
+ x *= 1.0 / S1
2332
+ x *= 1.0 / S2.t()
2333
+ return x.to(dtype)
2334
+ elif quant_type == "row":
2335
+ x = xq.float()
2336
+ if len(S1.shape) == 3 and len(x.shape) == 2:
2337
+ S1 = S1.squeeze(0)
2338
+ if len(S2.shape) == 3 and len(x.shape) == 2:
2339
+ S2 = S2.squeeze(0)
2340
+ if len(S1.shape) == 2:
2341
+ x *= S1 * S2 / (C * C)
2342
+ else:
2343
+ x *= S1 * S2 / (C * C)
2344
+ return x.to(dtype)
2345
+ elif quant_type in ["truncated-vector", "vector"]:
2346
+ x = xq.float()
2347
+ if len(S1.shape) == 3 and len(x.shape) == 2:
2348
+ S1 = S1.squeeze(0)
2349
+ if len(S2.shape) == 3 and len(x.shape) == 2:
2350
+ S2 = S2.squeeze(0)
2351
+ if len(S1.shape) == 2:
2352
+ x *= S1 / C
2353
+ else:
2354
+ x *= S1 / C
2355
+ x *= S2 / C
2356
+ return x.to(dtype)
2357
+ else:
2358
+ return None
2359
+
2360
+
2361
+ def dequant_min_max(xq, A, B, SA, SB, dtype=torch.half):
2362
+ offset = B.float().t().sum(0) * (SA[0] + SA[1])
2363
+ x = xq.float()
2364
+ if len(xq.shape) == 2 and len(SB.shape) == 3:
2365
+ SB = SB.squeeze(0)
2366
+ if len(SB.shape) == 2:
2367
+ x *= SB.t() / 127
2368
+ else:
2369
+ x *= SB / 127
2370
+ x *= SA[1] / 127
2371
+ x += offset
2372
+ return x.to(dtype)
2373
+
2374
+
2375
+ def extract_outliers(A, SA, idx):
2376
+ shapeA = SA[0]
2377
+ formatA = SA[1]
2378
+ assert formatA in ["col_turing", "col_ampere"]
2379
+ assert A.device.type == "cuda"
2380
+
2381
+ out = torch.zeros(
2382
+ (shapeA[0], idx.numel()), dtype=torch.int8, device=A.device
2383
+ )
2384
+
2385
+ idx_size = ct.c_int32(idx.numel())
2386
+ rows = ct.c_int32(shapeA[0])
2387
+ cols = ct.c_int32(shapeA[1])
2388
+ ptrA = get_ptr(A)
2389
+ ptrIdx = get_ptr(idx)
2390
+ ptrOut = get_ptr(out)
2391
+
2392
+ prev_device = pre_call(A.device)
2393
+ if formatA == 'col_turing':
2394
+ lib.cextractOutliers_turing(ptrA, ptrIdx, ptrOut, idx_size, rows, cols)
2395
+ elif formatA == "col_ampere":
2396
+ lib.cextractOutliers_ampere(ptrA, ptrIdx, ptrOut, idx_size, rows, cols)
2397
+ post_call(prev_device)
2398
+
2399
+ return out
2400
+
2401
+ def pipeline_test(A, batch_size):
2402
+ out = torch.zeros_like(A)
2403
+ lib.cpipeline_test(get_ptr(A), get_ptr(out), ct.c_size_t(A.numel()), ct.c_size_t(batch_size))
2404
+ return out
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d33ffbdc477c2cf67635a876cd185d07032884e45f729225f925ad411290ac
3
+ size 5938904
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda110_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abfd599f616509de8b6976308e40eb13faead2516e3700e318126558cfcdb9f8
3
+ size 11110784
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2412d9eb0056079c750dab05ab1728839f71c64b395c7ccb01d4051998c28836
3
+ size 8974040
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda111_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6ab2775c01535c0b3f8b717cad598c7cd613c98227c880d0f0d4c4b1f356b2b
3
+ size 20244864
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda114.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:921d1d561cf912848872916846df6f07daed31261ac85ad3d6e4260c03ff9c22
3
+ size 9313912
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda115_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6f07c6d5f84ea95d3299cce99fc455a2cf797b2ebb50b92021f67df7a18d613
3
+ size 20925040
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda117_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ea73f42ca2c101ce377b70d14d03ee901b97b51861b570b7c4a21edddb7cf10
3
+ size 20741032
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda118_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19a62aa3c37b70d9316c0848f9d8b3cb205bb8ea9b11b3514749f287c2c8596e
3
+ size 26516696
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d689551ab03f16b9b767361a5310ca4727965db2654d35a194f8ced5ad2297fa
3
+ size 14504296
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda120_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5545f220765be15ebe48e18f54f5951f278a69f1bdc503cb11021e95387af4ea
3
+ size 25709592
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d128372dfbdb34a68429d2c3454a076fac650c79dda546ba2be9a5415378463
3
+ size 14512488
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61e607dfa9b987637bb542d4a19f06b7e0807dbf2cccb0cf5b99bd38b0101401
3
+ size 25721880
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16b8578667eb6836c6b7923a3b3508d62809e4e91429674a0c3ab97cf60c5349
3
+ size 14561032
mgm/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda122_nocublaslt.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58f4a2043fe8cee52c93f69176825de57553727a5a9e7984991eb2a24da66530
3
+ size 25803272
mgm/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from .modules import Int8Params, Linear8bitLt, StableEmbedding, Linear4bit, LinearNF4, LinearFP4, Params4bit, OutlierAwareLinear, SwitchBackLinearBnb
6
+ from .triton_based_modules import SwitchBackLinear, SwitchBackLinearGlobal, SwitchBackLinearVectorwise, StandardLinear
mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (552 Bytes). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc ADDED
Binary file (15.3 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/nn/modules.py ADDED
@@ -0,0 +1,518 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from typing import Optional, TypeVar, Union, overload
6
+
7
+ import warnings
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from torch import Tensor, device, dtype, nn
11
+
12
+ import bitsandbytes as bnb
13
+ import bitsandbytes.functional
14
+ from bitsandbytes.autograd._functions import undo_layout, get_tile_inds
15
+ from bitsandbytes.optim import GlobalOptimManager
16
+ from bitsandbytes.utils import OutlierTracer, find_outlier_dims
17
+
18
+ T = TypeVar("T", bound="torch.nn.Module")
19
+
20
+
21
+ class StableEmbedding(torch.nn.Embedding):
22
+ def __init__(
23
+ self,
24
+ num_embeddings: int,
25
+ embedding_dim: int,
26
+ padding_idx: Optional[int] = None,
27
+ max_norm: Optional[float] = None,
28
+ norm_type: float = 2.0,
29
+ scale_grad_by_freq: bool = False,
30
+ sparse: bool = False,
31
+ _weight: Optional[Tensor] = None,
32
+ device=None,
33
+ dtype=None,
34
+ ) -> None:
35
+ super().__init__(
36
+ num_embeddings,
37
+ embedding_dim,
38
+ padding_idx,
39
+ max_norm,
40
+ norm_type,
41
+ scale_grad_by_freq,
42
+ sparse,
43
+ _weight,
44
+ device,
45
+ dtype,
46
+ )
47
+ self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
48
+ GlobalOptimManager.get_instance().register_module_override(
49
+ self, "weight", {"optim_bits": 32}
50
+ )
51
+
52
+ def reset_parameters(self) -> None:
53
+ torch.nn.init.xavier_uniform_(self.weight)
54
+ self._fill_padding_idx_with_zero()
55
+
56
+ """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
57
+ to make the Layer compatible with Pytorch < 1.9.
58
+ This means that if this changes in future PyTorch releases this need to change too
59
+ which is cumbersome. However, with this we can ensure compatibility with previous
60
+ PyTorch releases.
61
+ """
62
+
63
+ def _fill_padding_idx_with_zero(self) -> None:
64
+ if self.padding_idx is not None:
65
+ with torch.no_grad():
66
+ self.weight[self.padding_idx].fill_(0)
67
+
68
+ def forward(self, input: Tensor) -> Tensor:
69
+ emb = F.embedding(
70
+ input,
71
+ self.weight,
72
+ self.padding_idx,
73
+ self.max_norm,
74
+ self.norm_type,
75
+ self.scale_grad_by_freq,
76
+ self.sparse,
77
+ )
78
+
79
+ # always apply layer norm in full precision
80
+ emb = emb.to(torch.get_default_dtype())
81
+
82
+ return self.norm(emb).to(self.weight.dtype)
83
+
84
+
85
+ class Embedding(torch.nn.Embedding):
86
+ def __init__(
87
+ self,
88
+ num_embeddings: int,
89
+ embedding_dim: int,
90
+ padding_idx: Optional[int] = None,
91
+ max_norm: Optional[float] = None,
92
+ norm_type: float = 2.0,
93
+ scale_grad_by_freq: bool = False,
94
+ sparse: bool = False,
95
+ _weight: Optional[Tensor] = None,
96
+ device: Optional[device] = None,
97
+ ) -> None:
98
+ super().__init__(
99
+ num_embeddings,
100
+ embedding_dim,
101
+ padding_idx,
102
+ max_norm,
103
+ norm_type,
104
+ scale_grad_by_freq,
105
+ sparse,
106
+ _weight,
107
+ device=device
108
+ )
109
+ GlobalOptimManager.get_instance().register_module_override(
110
+ self, "weight", {"optim_bits": 32}
111
+ )
112
+
113
+ def reset_parameters(self) -> None:
114
+ torch.nn.init.xavier_uniform_(self.weight)
115
+ self._fill_padding_idx_with_zero()
116
+
117
+ """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
118
+ to make the Layer compatible with Pytorch < 1.9.
119
+ This means that if this changes in future PyTorch releases this need to change too
120
+ which is cumbersome. However, with this we can ensure compatibility with previous
121
+ PyTorch releases.
122
+ """
123
+
124
+ def _fill_padding_idx_with_zero(self) -> None:
125
+ if self.padding_idx is not None:
126
+ with torch.no_grad():
127
+ self.weight[self.padding_idx].fill_(0)
128
+
129
+ def forward(self, input: Tensor) -> Tensor:
130
+ emb = F.embedding(
131
+ input,
132
+ self.weight,
133
+ self.padding_idx,
134
+ self.max_norm,
135
+ self.norm_type,
136
+ self.scale_grad_by_freq,
137
+ self.sparse,
138
+ )
139
+
140
+ return emb
141
+
142
+ class Params4bit(torch.nn.Parameter):
143
+ def __new__(cls, data=None, requires_grad=True, quant_state=None, blocksize=64, compress_statistics=True, quant_type='fp4'):
144
+ if data is None:
145
+ data = torch.empty(0)
146
+
147
+ self = torch.Tensor._make_subclass(cls, data, requires_grad)
148
+ self.blocksize = blocksize
149
+ self.compress_statistics = compress_statistics
150
+ self.quant_type = quant_type
151
+ self.quant_state = quant_state
152
+ self.data = data
153
+ return self
154
+
155
+ def cuda(self, device):
156
+ w = self.data.contiguous().half().cuda(device)
157
+ w_4bit, quant_state = bnb.functional.quantize_4bit(w, blocksize=self.blocksize, compress_statistics=self.compress_statistics, quant_type=self.quant_type)
158
+ self.data = w_4bit
159
+ self.quant_state = quant_state
160
+
161
+ return self
162
+
163
+ @overload
164
+ def to(self: T, device: Optional[Union[int, device]] = ..., dtype: Optional[Union[dtype, str]] = ..., non_blocking: bool = ...,) -> T:
165
+ ...
166
+
167
+ @overload
168
+ def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
169
+ ...
170
+
171
+ @overload
172
+ def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
173
+ ...
174
+
175
+ def to(self, *args, **kwargs):
176
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
177
+
178
+ if (device is not None and device.type == "cuda" and self.data.device.type == "cpu"):
179
+ return self.cuda(device)
180
+ else:
181
+ s = self.quant_state
182
+ if s is not None:
183
+ # make sure the quantization state is on the right device
184
+ s[0] = s[0].to(device)
185
+ if self.compress_statistics:
186
+ # TODO: refactor this. This is a nightmare
187
+ # for 4-bit:
188
+ # state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]
189
+ # state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]
190
+ #s[-2][0] = s[-2][0].to(device) # offset
191
+ #s[-2][1][0] = s[-2][1][0].to(device) # nested absmax
192
+
193
+ # for 8-bit
194
+ s[-3][0] = s[-3][0].to(device) # offset
195
+ s[-3][1][0] = s[-3][1][0].to(device) # nested quantiation state statitics
196
+ s[-3][1][1] = s[-3][1][1].to(device) # nested quantiation codebook
197
+ new_param = Params4bit(super().to(device=device, dtype=dtype, non_blocking=non_blocking),
198
+ requires_grad=self.requires_grad, quant_state=self.quant_state,
199
+ blocksize=self.blocksize, compress_statistics=self.compress_statistics,
200
+ quant_type=self.quant_type)
201
+
202
+ return new_param
203
+
204
+ class Linear4bit(nn.Linear):
205
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True, quant_type='fp4',device=None):
206
+ super().__init__(input_features, output_features, bias, device)
207
+ self.weight = Params4bit(self.weight.data, requires_grad=False, compress_statistics=compress_statistics, quant_type=quant_type)
208
+ self.compute_dtype = compute_dtype
209
+ self.compute_type_is_set = False
210
+
211
+ def set_compute_type(self, x):
212
+ if x.dtype in [torch.float32, torch.bfloat16]:
213
+ # the input is in a dtype that is safe to compute in, we switch
214
+ # to this type for speed and stability
215
+ self.compute_dtype = x.dtype
216
+ elif x.dtype == torch.float16:
217
+ # we take the compoute dtype passed into the layer
218
+ if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
219
+ # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
220
+ # warn the user about this
221
+ warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference.')
222
+ warnings.filterwarnings('ignore', message='.*inference.')
223
+ if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
224
+ warnings.warn(f'Input type into Linear4bit is torch.float16, but bnb_4bit_compute_type=torch.float32 (default). This will lead to slow inference or training speed.')
225
+ warnings.filterwarnings('ignore', message='.*inference or training')
226
+
227
+
228
+
229
+
230
+
231
+
232
+ def forward(self, x: torch.Tensor):
233
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
234
+ if self.bias is not None and self.bias.dtype != x.dtype:
235
+ self.bias.data = self.bias.data.to(x.dtype)
236
+
237
+ if getattr(self.weight, 'quant_state', None) is None:
238
+ print('FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.')
239
+ if not self.compute_type_is_set:
240
+ self.set_compute_type(x)
241
+ self.compute_type_is_set = True
242
+
243
+ inp_dtype = x.dtype
244
+ if self.compute_dtype is not None:
245
+ x = x.to(self.compute_dtype)
246
+
247
+ bias = None if self.bias is None else self.bias.to(self.compute_dtype)
248
+ out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state)
249
+
250
+ out = out.to(inp_dtype)
251
+
252
+ return out
253
+
254
+ class LinearFP4(Linear4bit):
255
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
256
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'fp4', device)
257
+
258
+ class LinearNF4(Linear4bit):
259
+ ''' Implements the NF4 data type.
260
+
261
+ Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
262
+ is normalized into the range [-1, 1].
263
+
264
+ For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
265
+
266
+ Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
267
+ the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
268
+ '''
269
+ def __init__(self, input_features, output_features, bias=True, compute_dtype=None, compress_statistics=True,device=None):
270
+ super().__init__(input_features, output_features, bias, compute_dtype, compress_statistics, 'nf4', device)
271
+
272
+
273
+
274
+ class Int8Params(torch.nn.Parameter):
275
+ def __new__(
276
+ cls,
277
+ data=None,
278
+ requires_grad=True,
279
+ has_fp16_weights=False,
280
+ CB=None,
281
+ SCB=None,
282
+ ):
283
+ cls.has_fp16_weights = has_fp16_weights
284
+ cls.CB = None
285
+ cls.SCB = None
286
+ if data is None:
287
+ data = torch.empty(0)
288
+ return torch.Tensor._make_subclass(cls, data, requires_grad)
289
+
290
+ def cuda(self, device):
291
+ if self.has_fp16_weights:
292
+ return super().cuda(device)
293
+ else:
294
+ # we store the 8-bit rows-major weight
295
+ # we convert this weight to the turning/ampere weight during the first inference pass
296
+ B = self.data.contiguous().half().cuda(device)
297
+ CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
298
+ del CBt
299
+ del SCBt
300
+ self.data = CB
301
+ setattr(self, "CB", CB)
302
+ setattr(self, "SCB", SCB)
303
+
304
+ return self
305
+
306
+ @overload
307
+ def to(
308
+ self: T,
309
+ device: Optional[Union[int, device]] = ...,
310
+ dtype: Optional[Union[dtype, str]] = ...,
311
+ non_blocking: bool = ...,
312
+ ) -> T:
313
+ ...
314
+
315
+ @overload
316
+ def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
317
+ ...
318
+
319
+ @overload
320
+ def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
321
+ ...
322
+
323
+ def to(self, *args, **kwargs):
324
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(
325
+ *args, **kwargs
326
+ )
327
+
328
+ if (
329
+ device is not None
330
+ and device.type == "cuda"
331
+ and self.data.device.type == "cpu"
332
+ ):
333
+ return self.cuda(device)
334
+ else:
335
+ new_param = Int8Params(
336
+ super().to(
337
+ device=device, dtype=dtype, non_blocking=non_blocking
338
+ ),
339
+ requires_grad=self.requires_grad,
340
+ has_fp16_weights=self.has_fp16_weights,
341
+ )
342
+ new_param.CB = self.CB
343
+ new_param.SCB = self.SCB
344
+
345
+ return new_param
346
+
347
+
348
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
349
+ weight = state_dict.get(f"{prefix}weight")
350
+ if weight is None:
351
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
352
+ return
353
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
354
+
355
+ if weight_format != "row":
356
+ tile_indices = get_tile_inds(weight_format, weight.device)
357
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
358
+
359
+
360
+ class Linear8bitLt(nn.Linear):
361
+ def __init__(self, input_features, output_features, bias=True, has_fp16_weights=True,
362
+ memory_efficient_backward=False, threshold=0.0, index=None, device=None):
363
+ super().__init__(input_features, output_features, bias, device)
364
+ assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
365
+ self.state = bnb.MatmulLtState()
366
+ self.index = index
367
+
368
+ self.state.threshold = threshold
369
+ self.state.has_fp16_weights = has_fp16_weights
370
+ self.state.memory_efficient_backward = memory_efficient_backward
371
+ if threshold > 0.0 and not has_fp16_weights:
372
+ self.state.use_pool = True
373
+
374
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
375
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
376
+
377
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
378
+ super()._save_to_state_dict(destination, prefix, keep_vars)
379
+
380
+ # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data
381
+ scb_name = "SCB"
382
+
383
+ # case 1: .cuda was called, SCB is in self.weight
384
+ param_from_weight = getattr(self.weight, scb_name)
385
+ # case 2: self.init_8bit_state was called, SCB is in self.state
386
+ param_from_state = getattr(self.state, scb_name)
387
+ # case 3: SCB is in self.state, weight layout reordered after first forward()
388
+ layout_reordered = self.state.CxB is not None
389
+
390
+ key_name = prefix + f"{scb_name}"
391
+ format_name = prefix + "weight_format"
392
+
393
+ if not self.state.has_fp16_weights:
394
+ if param_from_weight is not None:
395
+ destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()
396
+ destination[format_name] = "row"
397
+ elif param_from_state is not None and not layout_reordered:
398
+ destination[key_name] = param_from_state if keep_vars else param_from_state.detach()
399
+ destination[format_name] = "row"
400
+ elif param_from_state is not None:
401
+ destination[key_name] = param_from_state if keep_vars else param_from_state.detach()
402
+ destination[format_name] = self.state.formatB
403
+
404
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
405
+ missing_keys, unexpected_keys, error_msgs):
406
+ super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
407
+ error_msgs)
408
+ unexpected_copy = list(unexpected_keys)
409
+
410
+ for key in unexpected_copy:
411
+ input_name = key[len(prefix):]
412
+ if input_name == "SCB":
413
+ if self.weight.SCB is None:
414
+ # buffers not yet initialized, can't access them directly without quantizing first
415
+ raise RuntimeError("Loading a quantized checkpoint into non-quantized Linear8bitLt is "
416
+ "not supported. Please call module.cuda() before module.load_state_dict()")
417
+
418
+ input_param = state_dict[key]
419
+ self.weight.SCB.copy_(input_param)
420
+
421
+ if self.state.SCB is not None:
422
+ self.state.SCB = self.weight.SCB
423
+
424
+ unexpected_keys.remove(key)
425
+
426
+ def init_8bit_state(self):
427
+ self.state.CB = self.weight.CB
428
+ self.state.SCB = self.weight.SCB
429
+ self.weight.CB = None
430
+ self.weight.SCB = None
431
+
432
+ def forward(self, x: torch.Tensor):
433
+ self.state.is_training = self.training
434
+ if self.weight.CB is not None:
435
+ self.init_8bit_state()
436
+
437
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
438
+ if self.bias is not None and self.bias.dtype != x.dtype:
439
+ self.bias.data = self.bias.data.to(x.dtype)
440
+
441
+ out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
442
+
443
+ if not self.state.has_fp16_weights:
444
+ if self.state.CB is not None and self.state.CxB is not None:
445
+ # we converted 8-bit row major to turing/ampere format in the first inference pass
446
+ # we no longer need the row-major weight
447
+ del self.state.CB
448
+ self.weight.data = self.state.CxB
449
+ return out
450
+
451
+
452
+ class OutlierAwareLinear(nn.Linear):
453
+ def __init__(self, input_features, output_features, bias=True, device=None):
454
+ super().__init__(input_features, output_features, bias, device)
455
+ self.outlier_dim = None
456
+ self.is_quantized = False
457
+
458
+ def forward_with_outliers(self, x, outlier_idx):
459
+ raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
460
+
461
+ def quantize_weight(self, w, outlier_idx):
462
+ raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
463
+
464
+ def forward(self, x):
465
+ if self.outlier_dim is None:
466
+ tracer = OutlierTracer.get_instance()
467
+ if not tracer.is_initialized():
468
+ print('Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer')
469
+ outlier_idx = tracer.get_outliers(self.weight)
470
+ #print(outlier_idx, tracer.get_hvalue(self.weight))
471
+ self.outlier_dim = outlier_idx
472
+
473
+ if not self.is_quantized:
474
+ w = self.quantize_weight(self.weight, self.outlier_dim)
475
+ self.weight.data.copy_(w)
476
+ self.is_quantized = True
477
+
478
+ class SwitchBackLinearBnb(nn.Linear):
479
+ def __init__(
480
+ self,
481
+ input_features,
482
+ output_features,
483
+ bias=True,
484
+ has_fp16_weights=True,
485
+ memory_efficient_backward=False,
486
+ threshold=0.0,
487
+ index=None,
488
+ device=None
489
+ ):
490
+ super().__init__(
491
+ input_features, output_features, bias, device
492
+ )
493
+ self.state = bnb.MatmulLtState()
494
+ self.index = index
495
+
496
+ self.state.threshold = threshold
497
+ self.state.has_fp16_weights = has_fp16_weights
498
+ self.state.memory_efficient_backward = memory_efficient_backward
499
+ if threshold > 0.0 and not has_fp16_weights:
500
+ self.state.use_pool = True
501
+
502
+ self.weight = Int8Params(
503
+ self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights
504
+ )
505
+
506
+ def init_8bit_state(self):
507
+ self.state.CB = self.weight.CB
508
+ self.state.SCB = self.weight.SCB
509
+ self.weight.CB = None
510
+ self.weight.SCB = None
511
+
512
+ def forward(self, x):
513
+ self.state.is_training = self.training
514
+
515
+ if self.weight.CB is not None:
516
+ self.init_8bit_state()
517
+
518
+ out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
mgm/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import time
4
+ from functools import partial
5
+
6
+ from bitsandbytes.triton.triton_utils import is_triton_available
7
+
8
+ from bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise
9
+ from bitsandbytes.triton.quantize_rowwise import quantize_rowwise
10
+ from bitsandbytes.triton.quantize_columnwise_and_transpose import quantize_columnwise_and_transpose
11
+ from bitsandbytes.triton.int8_matmul_rowwise_dequantize import int8_matmul_rowwise_dequantize
12
+ from bitsandbytes.triton.quantize_global import quantize_global, quantize_global_transpose
13
+ from bitsandbytes.triton.int8_matmul_mixed_dequanitze import int8_matmul_mixed_dequanitze
14
+
15
+
16
+ class _switchback_global(torch.autograd.Function):
17
+
18
+ @staticmethod
19
+ def forward(ctx, X_3D, W, bias):
20
+ # reshape input to [N * L, D]
21
+ X = X_3D.view(-1, X_3D.size(-1))
22
+
23
+ # rowwise quantize for X, global quantize for W
24
+ X_int8, state_X = quantize_rowwise(X)
25
+ W_int8, state_W = quantize_global(W)
26
+
27
+ # save for backward.
28
+ ctx.save_for_backward = X, W
29
+
30
+ # matmult, fused dequant and add bias
31
+ # call "mixed" because we are mixing rowwise quantized and global quantized
32
+ return int8_matmul_mixed_dequanitze(
33
+ X_int8, W_int8.t(), state_X, state_W, bias
34
+ ).view(*X_3D.size()[:-1], -1)
35
+
36
+ @staticmethod
37
+ def backward(ctx, G_3D):
38
+ # reshape input to [N_out * L, D]
39
+ G = G_3D.reshape(-1, G_3D.size(-1))
40
+
41
+ grad_X = grad_W = grad_bias = None
42
+
43
+ X, W = ctx.save_for_backward
44
+ if ctx.needs_input_grad[0]:
45
+ # rowwise quantize for G, global quantize for W
46
+ # for W, we also fuse the transpose operation because only A @ B^T is supported
47
+ # so we transpose once then call .t() in the matmul
48
+ G_int8, state_G = quantize_rowwise(G)
49
+ W_int8, state_W = quantize_global_transpose(W)
50
+ grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
51
+ *G_3D.size()[:-1], -1
52
+ )
53
+ if ctx.needs_input_grad[1]:
54
+ # backward pass uses standard weight grad
55
+ grad_W = torch.matmul(G.t(), X.to(G.dtype))
56
+ if ctx.needs_input_grad[2]:
57
+ grad_bias = G.sum(dim=0)
58
+
59
+ return grad_X, grad_W, grad_bias
60
+
61
+ class _switchback_vectorrize(torch.autograd.Function):
62
+
63
+ @staticmethod
64
+ def forward(ctx, X_3D, W, bias):
65
+ # reshape input to [N * L, D]
66
+ X = X_3D.view(-1, X_3D.size(-1))
67
+
68
+ ctx.save_for_backward = X, W
69
+ # rowwise quantize for X
70
+ # columnwise quantize for W (first rowwise, transpose later)
71
+ X_int8, state_X = quantize_rowwise(X)
72
+ W_int8, state_W = quantize_rowwise(W)
73
+
74
+ # matmult, fused dequant and add bias
75
+ # call kernel which expects rowwise quantized X and W
76
+ return int8_matmul_rowwise_dequantize(
77
+ X_int8, W_int8.t(), state_X, state_W, bias
78
+ ).view(*X_3D.size()[:-1], -1)
79
+
80
+ @staticmethod
81
+ def backward(ctx, G_3D):
82
+ X, W = ctx.save_for_backward
83
+
84
+ G = G_3D.reshape(-1, G_3D.size(-1))
85
+
86
+ grad_X = grad_W = grad_bias = None
87
+
88
+ if ctx.needs_input_grad[0]:
89
+ # rowwise quantize for G, columnwise quantize for W and fused transpose
90
+ # we call .t() for weight later because only A @ B^T is supported
91
+ G_int8, state_G = quantize_rowwise(G)
92
+ W_int8, state_W = quantize_columnwise_and_transpose(W)
93
+ grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
94
+ *G_3D.size()[:-1], -1
95
+ )
96
+ if ctx.needs_input_grad[1]:
97
+ # backward pass uses standard weight grad
98
+ grad_W = torch.matmul(G.t(), X.to(G.dtype))
99
+ if ctx.needs_input_grad[2]:
100
+ grad_bias = G.sum(dim=0)
101
+
102
+ return grad_X, grad_W, grad_bias
103
+
104
+ class _switchback_global_mem_efficient(torch.autograd.Function):
105
+
106
+ @staticmethod
107
+ def forward(ctx, X_3D, W, bias):
108
+ # reshape input to [N * L, D]
109
+ X = X_3D.view(-1, X_3D.size(-1))
110
+ X_3D_sz = X_3D.size()
111
+
112
+ # rowwise quantize for X, global quantize for W
113
+ X_int8, state_X = quantize_rowwise(X)
114
+ del X
115
+ W_int8, state_W = quantize_global(W)
116
+
117
+ # save for backward.
118
+ ctx.save_for_backward = X_int8, state_X, W_int8, state_W
119
+
120
+ # matmult, fused dequant and add bias
121
+ # call "mixed" because we are mixing rowwise quantized and global quantized
122
+ return int8_matmul_mixed_dequanitze(
123
+ X_int8, W_int8.t(), state_X, state_W, bias
124
+ ).view(*X_3D_sz[:-1], -1)
125
+
126
+ @staticmethod
127
+ def backward(ctx, G_3D):
128
+ # reshape input to [N_out * L, D]
129
+ G = G_3D.reshape(-1, G_3D.size(-1))
130
+ G_3D_sz = G_3D.size()
131
+
132
+ grad_X = grad_W = grad_bias = None
133
+
134
+ X_int8, state_X, W_int8, state_W = ctx.save_for_backward
135
+ if ctx.needs_input_grad[1]:
136
+ real_X = dequantize_rowwise(X_int8, state_X)
137
+ del X_int8
138
+ grad_W = torch.matmul(G.t(), real_X.to(G.dtype))
139
+ del real_X
140
+ if ctx.needs_input_grad[2]:
141
+ grad_bias = G.sum(dim=0)
142
+ if ctx.needs_input_grad[0]:
143
+ G_int8, state_G = quantize_rowwise(G)
144
+ del G
145
+ W_int8 = W_int8.t().contiguous()
146
+ grad_X = int8_matmul_mixed_dequanitze(G_int8, W_int8.t(), state_G, state_W, None).view(
147
+ *G_3D_sz[:-1], -1
148
+ )
149
+
150
+ return grad_X, grad_W, grad_bias
151
+
152
+ class SwitchBackLinear(nn.Linear):
153
+ def __init__(
154
+ self,
155
+ in_features: int,
156
+ out_features: int,
157
+ bias: bool = True,
158
+ device=None,
159
+ dtype=None,
160
+ vector_wise_quantization: bool = False,
161
+ mem_efficient : bool = False,
162
+ ):
163
+ super().__init__(in_features, out_features, bias, device, dtype)
164
+
165
+ if not is_triton_available:
166
+ raise ImportError('''Could not import triton. Please install triton to use SwitchBackLinear.
167
+ Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower''')
168
+
169
+ # By default, we use the global quantization.
170
+ self.vector_wise_quantization = vector_wise_quantization
171
+ if self.vector_wise_quantization:
172
+ self._fn = _switchback_vectorrize
173
+ if mem_efficient:
174
+ print('mem efficient is not supported for vector-wise quantization.')
175
+ exit(1)
176
+ else:
177
+ if mem_efficient:
178
+ self._fn = _switchback_global_mem_efficient
179
+ else:
180
+ self._fn = _switchback_global
181
+
182
+ def prepare_for_eval(self):
183
+ # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.
184
+ # Note this is experimental and not tested thoroughly.
185
+ # Note this needs to be explicitly called with something like
186
+ # def cond_prepare(m):
187
+ # if hasattr(m, "prepare_for_eval"):
188
+ # m.prepare_for_eval()
189
+ # model.apply(cond_prepare)
190
+ print('=> preparing for eval.')
191
+ if self.vector_wise_quantization:
192
+ W_int8, state_W = quantize_rowwise(self.weight)
193
+ else:
194
+ W_int8, state_W = quantize_global(self.weight)
195
+
196
+ self.register_buffer("W_int8", W_int8)
197
+ self.register_buffer("state_W", state_W)
198
+
199
+ del self.weight
200
+
201
+ def forward(self, x):
202
+ if self.training:
203
+ return self._fn.apply(x, self.weight, self.bias)
204
+ else:
205
+ # If it hasn't been "prepared for eval", run the standard forward pass.
206
+ if not hasattr(self, "W_int8"):
207
+ return self._fn.apply(x, self.weight, self.bias)
208
+
209
+ # Otherwise, use pre-computed weights.
210
+ X = x.view(-1, x.size(-1))
211
+ X_int8, state_X = quantize_rowwise(X)
212
+
213
+ if self.vector_wise_quantization:
214
+ return int8_matmul_rowwise_dequantize(
215
+ X_int8, self.W_int8.t(), state_X, self.state_W, self.bias
216
+ ).view(*x.size()[:-1], -1)
217
+ else:
218
+ return int8_matmul_mixed_dequanitze(
219
+ X_int8, self.W_int8.t(), state_X, self.state_W, self.bias
220
+ ).view(*x.size()[:-1], -1)
221
+
222
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)
223
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)
224
+ SwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)
225
+
226
+ # This is just the standard linear function.
227
+ class StandardLinearFunction(torch.autograd.Function):
228
+ @staticmethod
229
+ def forward(ctx, input, weight, bias=None):
230
+ X = input.view(-1, input.size(-1))
231
+
232
+ ctx.save_for_backward(X, weight, bias)
233
+ output = input.matmul(weight.t())
234
+ if bias is not None:
235
+ output += bias.unsqueeze(0).expand_as(output)
236
+ return output.view(*input.size()[:-1], -1)
237
+
238
+ @staticmethod
239
+ def backward(ctx, grad_output_3D):
240
+ input, weight, bias = ctx.saved_tensors
241
+
242
+ grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))
243
+
244
+ grad_input = grad_weight = grad_bias = None
245
+
246
+ if ctx.needs_input_grad[0]:
247
+ grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)
248
+ if ctx.needs_input_grad[1]:
249
+ grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))
250
+ if bias is not None and ctx.needs_input_grad[2]:
251
+ grad_bias = grad_output.sum(0)
252
+
253
+ return grad_input, grad_weight, grad_bias
254
+
255
+ class StandardLinear(nn.Linear):
256
+
257
+ def forward(self, x):
258
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from bitsandbytes.cextension import COMPILED_WITH_CUDA
7
+
8
+ from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit
9
+ from .adam import Adam, Adam8bit, Adam32bit, PagedAdam, PagedAdam8bit, PagedAdam32bit
10
+ from .adamw import AdamW, AdamW8bit, AdamW32bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit
11
+ from .lamb import LAMB, LAMB8bit, LAMB32bit
12
+ from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS
13
+ from .optimizer import GlobalOptimManager
14
+ from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit
15
+ from .lion import Lion, Lion8bit, Lion32bit, PagedLion, PagedLion8bit, PagedLion32bit
16
+ from .sgd import SGD, SGD8bit, SGD32bit
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.12 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adagrad.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adam.cpython-310.pyc ADDED
Binary file (7.98 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/adamw.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lamb.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lars.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/rmsprop.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/sgd.cpython-310.pyc ADDED
Binary file (1.64 kB). View file
 
mgm/lib/python3.10/site-packages/bitsandbytes/optim/adagrad.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from bitsandbytes.optim.optimizer import Optimizer1State
6
+
7
+
8
+ class Adagrad(Optimizer1State):
9
+ def __init__(
10
+ self,
11
+ params,
12
+ lr=1e-2,
13
+ lr_decay=0,
14
+ weight_decay=0,
15
+ initial_accumulator_value=0,
16
+ eps=1e-10,
17
+ optim_bits=32,
18
+ args=None,
19
+ min_8bit_size=4096,
20
+ percentile_clipping=100,
21
+ block_wise=True,
22
+ ):
23
+ if not 0.0 <= lr:
24
+ raise ValueError(f"Invalid learning rate: {lr}")
25
+ if not 0.0 <= weight_decay:
26
+ raise ValueError(
27
+ f"Invalid weight_decay value: {weight_decay}"
28
+ )
29
+ if not 0.0 <= eps:
30
+ raise ValueError(f"Invalid epsilon value: {eps}")
31
+ if initial_accumulator_value != 0.0:
32
+ raise ValueError("Initial accumulator value != 0.0 not supported!")
33
+ if lr_decay != 0.0:
34
+ raise ValueError("Lr Decay != 0.0 not supported!")
35
+ super().__init__(
36
+ "adagrad",
37
+ params,
38
+ lr,
39
+ (0.0, 0.0),
40
+ eps,
41
+ weight_decay,
42
+ optim_bits,
43
+ args,
44
+ min_8bit_size,
45
+ percentile_clipping,
46
+ block_wise,
47
+ )
48
+
49
+
50
+ class Adagrad8bit(Optimizer1State):
51
+ def __init__(
52
+ self,
53
+ params,
54
+ lr=1e-2,
55
+ lr_decay=0,
56
+ weight_decay=0,
57
+ initial_accumulator_value=0,
58
+ eps=1e-10,
59
+ optim_bits=8,
60
+ args=None,
61
+ min_8bit_size=4096,
62
+ percentile_clipping=100,
63
+ block_wise=True,
64
+ ):
65
+ if not 0.0 <= lr:
66
+ raise ValueError(f"Invalid learning rate: {lr}")
67
+ if not 0.0 <= weight_decay:
68
+ raise ValueError(
69
+ f"Invalid weight_decay value: {weight_decay}"
70
+ )
71
+ if not 0.0 <= eps:
72
+ raise ValueError(f"Invalid epsilon value: {eps}")
73
+ if initial_accumulator_value != 0.0:
74
+ raise ValueError("Initial accumulator value != 0.0 not supported!")
75
+ if lr_decay != 0.0:
76
+ raise ValueError("Lr Decay != 0.0 not supported!")
77
+ assert block_wise
78
+ super().__init__(
79
+ "adagrad",
80
+ params,
81
+ lr,
82
+ (0.0, 0.0),
83
+ eps,
84
+ weight_decay,
85
+ 8,
86
+ args,
87
+ min_8bit_size,
88
+ percentile_clipping,
89
+ block_wise,
90
+ )
91
+
92
+
93
+ class Adagrad32bit(Optimizer1State):
94
+ def __init__(
95
+ self,
96
+ params,
97
+ lr=1e-2,
98
+ lr_decay=0,
99
+ weight_decay=0,
100
+ initial_accumulator_value=0,
101
+ eps=1e-10,
102
+ optim_bits=32,
103
+ args=None,
104
+ min_8bit_size=4096,
105
+ percentile_clipping=100,
106
+ block_wise=True,
107
+ ):
108
+ if not 0.0 <= lr:
109
+ raise ValueError(f"Invalid learning rate: {lr}")
110
+ if not 0.0 <= weight_decay:
111
+ raise ValueError(
112
+ f"Invalid weight_decay value: {weight_decay}"
113
+ )
114
+ if not 0.0 <= eps:
115
+ raise ValueError(f"Invalid epsilon value: {eps}")
116
+ if initial_accumulator_value != 0.0:
117
+ raise ValueError("Initial accumulator value != 0.0 not supported!")
118
+ if lr_decay != 0.0:
119
+ raise ValueError("Lr Decay != 0.0 not supported!")
120
+ super().__init__(
121
+ "adagrad",
122
+ params,
123
+ lr,
124
+ (0.0, 0.0),
125
+ eps,
126
+ weight_decay,
127
+ 32,
128
+ args,
129
+ min_8bit_size,
130
+ percentile_clipping,
131
+ block_wise,
132
+ )
mgm/lib/python3.10/site-packages/bitsandbytes/optim/adam.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ import math
7
+ import os
8
+
9
+ import torch
10
+ import torch.distributed as dist
11
+
12
+ import bitsandbytes.functional as F
13
+ from bitsandbytes.optim.optimizer import Optimizer2State
14
+
15
+
16
+ class Adam(Optimizer2State):
17
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
18
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
19
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
20
+
21
+ class Adam8bit(Optimizer2State):
22
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
23
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
24
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
25
+
26
+ class Adam32bit(Optimizer2State):
27
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
28
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
29
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
30
+
31
+ class PagedAdam(Optimizer2State):
32
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
33
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
34
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
35
+
36
+ class PagedAdam8bit(Optimizer2State):
37
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
38
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
39
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
40
+
41
+ class PagedAdam32bit(Optimizer2State):
42
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, optim_bits=32,
43
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
44
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
45
+
46
+ class AnalysisAdam(torch.optim.Optimizer):
47
+ """Adam that performs 8-bit vs 32-bit error analysis.
48
+
49
+ This implementation is modified from torch.optim.Adam based on:
50
+ `Fixed Weight Decay Regularization in Adam`
51
+ (see https://arxiv.org/abs/1711.05101)
52
+
53
+ It has been proposed in `Adam: A Method for Stochastic Optimization`_.
54
+
55
+ Arguments:
56
+ params (iterable): iterable of parameters to optimize or dicts defining
57
+ parameter groups
58
+ lr (float, optional): learning rate (default: 1e-3)
59
+ betas (Tuple[float, float], optional): coefficients used for computing
60
+ running averages of gradient and its square (default: (0.9, 0.999))
61
+ eps (float, optional): term added to the denominator to improve
62
+ numerical stability (default: 1e-8)
63
+ weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
64
+ amsgrad (boolean, optional): whether to use the AMSGrad variant of this
65
+ algorithm from the paper `On the Convergence of Adam and Beyond`_
66
+
67
+ .. _Adam: A Method for Stochastic Optimization:
68
+ https://arxiv.org/abs/1412.6980
69
+ .. _On the Convergence of Adam and Beyond:
70
+ https://openreview.net/forum?id=ryQu7f-RZ
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ params,
76
+ lr=1e-3,
77
+ betas=(0.9, 0.999),
78
+ eps=1e-8,
79
+ weight_decay=0,
80
+ amsgrad=False,
81
+ bnb_analysis="dynamic-blockwise",
82
+ savedir=None,
83
+ ):
84
+ defaults = dict(
85
+ lr=lr,
86
+ betas=betas,
87
+ eps=eps,
88
+ weight_decay=weight_decay,
89
+ amsgrad=amsgrad,
90
+ )
91
+ super().__init__(params, defaults)
92
+ self.analysis = bnb_analysis
93
+ self.savedir = savedir
94
+
95
+ @property
96
+ def supports_memory_efficient_fp16(self):
97
+ return True
98
+
99
+ @property
100
+ def supports_flat_params(self):
101
+ return True
102
+
103
+ def step(self, closure=None):
104
+ """Performs a single optimization step.
105
+
106
+ Arguments:
107
+ closure (callable, optional): A closure that reevaluates the model
108
+ and returns the loss.
109
+ """
110
+ loss = None
111
+ if closure is not None:
112
+ loss = closure()
113
+
114
+ for group in self.param_groups:
115
+ for p_id, p in enumerate(group["params"]):
116
+ if p.grad is None:
117
+ continue
118
+ grad = p.grad.data
119
+ if grad.dtype in {torch.float16, torch.bfloat16}:
120
+ grad = grad.float()
121
+ if grad.is_sparse:
122
+ raise RuntimeError(
123
+ "Adam does not support sparse gradients, please consider SparseAdam instead"
124
+ )
125
+ amsgrad = group.get("amsgrad", False)
126
+ assert not amsgrad
127
+
128
+ p_data_fp32 = p.data
129
+ if p.data.dtype in {torch.float16, torch.bfloat16}:
130
+ p_data_fp32 = p_data_fp32.float()
131
+
132
+ state = self.state[p]
133
+
134
+ # State initialization
135
+ if len(state) == 0:
136
+ state["step"] = 0
137
+ # Exponential moving average of gradient values
138
+ state["exp_avg"] = torch.zeros_like(p_data_fp32)
139
+ # Exponential moving average of squared gradient values
140
+ state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
141
+ state["abserrors"] = torch.zeros(
142
+ (256, 256), device=p_data_fp32.device
143
+ )
144
+ state["relerrors"] = torch.zeros(
145
+ (256, 256), device=p_data_fp32.device
146
+ )
147
+ state["counts"] = torch.zeros(
148
+ (256, 256), device=p_data_fp32.device
149
+ )
150
+ if amsgrad:
151
+ # Maintains max of all exp. moving avg. of sq. grad. values
152
+ state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
153
+ else:
154
+ state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
155
+ state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
156
+ if amsgrad:
157
+ state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
158
+ p_data_fp32
159
+ )
160
+
161
+ state["step"] += 1
162
+ beta1, beta2 = group["betas"]
163
+ bias_correction1 = 1 - beta1 ** state["step"]
164
+ bias_correction2 = 1 - beta2 ** state["step"]
165
+ step_size = (
166
+ group["lr"] * math.sqrt(bias_correction2) / bias_correction1
167
+ )
168
+ e = state["abserrors"]
169
+ rele = state["relerrors"]
170
+ counts = state["counts"]
171
+
172
+ if group["weight_decay"] != 0:
173
+ p_data_fp32.add_(
174
+ p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
175
+ )
176
+
177
+ exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
178
+ if amsgrad:
179
+ max_exp_avg_sq = state["max_exp_avg_sq"]
180
+
181
+ # Decay the first and second moment running average coefficient
182
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
183
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
184
+
185
+ denom = exp_avg_sq.sqrt().add_(group["eps"])
186
+ update_fp32 = exp_avg / denom
187
+
188
+ if (
189
+ p_data_fp32.numel() <= 8192
190
+ or p_data_fp32.numel() > 50000 * 1000
191
+ ):
192
+ # embedding layer or too small
193
+ p_data_fp32 += -step_size * update_fp32
194
+ else:
195
+ if self.analysis == "dynamic-blockwise":
196
+ code1 = F.create_dynamic_map(signed=True).to(p.device)
197
+ code2 = F.create_dynamic_map(signed=False).to(p.device)
198
+ C1, S1 = F.quantize_blockwise(exp_avg, code=code1)
199
+ state1 = F.dequantize_blockwise(C1, S1)
200
+ C2, S2 = F.quantize_blockwise(exp_avg_sq, code=code2)
201
+ state2 = F.dequantize_blockwise(C2, S2)
202
+ elif self.analysis == "dynamic":
203
+ code1 = F.create_dynamic_map(signed=True).to(p.device)
204
+ code2 = F.create_dynamic_map(signed=False).to(p.device)
205
+ C1, S1 = F.quantize(exp_avg, code=code1)
206
+ state1 = F.dequantize(C1, S1)
207
+ C2, S2 = F.quantize(exp_avg_sq, code=code2)
208
+ state2 = F.dequantize(C2, S2)
209
+ elif self.analysis == "linear":
210
+ code1 = F.create_linear_map(signed=True).to(p.device)
211
+ code2 = F.create_linear_map(signed=False).to(p.device)
212
+ C1, S1 = F.quantize(exp_avg, code=code1)
213
+ state1 = F.dequantize(C1, S1)
214
+ C2, S2 = F.quantize(exp_avg_sq, code=code2)
215
+ state2 = F.dequantize(C2, S2)
216
+ elif self.analysis == "quantile":
217
+ code1 = F.estimate_quantiles(exp_avg)
218
+ code2 = F.estimate_quantiles(exp_avg_sq)
219
+ C1 = F.quantize_no_absmax(exp_avg, code=code1)
220
+ state1 = F.dequantize_no_absmax(C1, code1)
221
+ C2 = F.quantize_no_absmax(exp_avg_sq, code=code2)
222
+ state2 = F.dequantize_no_absmax(C2, code2)
223
+ elif self.analysis == "my-quantization-routine":
224
+ pass
225
+ # 1. get code
226
+ # 2. quantize
227
+ # 3. dequantize
228
+ # Error will be calculated automatically!
229
+ else:
230
+ raise ValueError(
231
+ f"Invalid analysis value: {self.analysis}!"
232
+ )
233
+
234
+ denom = state2.sqrt().add_(group["eps"])
235
+ update_8bit = state1 / denom
236
+
237
+ abserr = torch.abs(update_8bit - update_fp32)
238
+ relerr = abserr / torch.abs(update_fp32 + 1e-6)
239
+
240
+ C1, C2 = C1.int(), C2.int()
241
+
242
+ F.histogram_scatter_add_2d(e, C1.int(), C2.int(), abserr)
243
+ F.histogram_scatter_add_2d(rele, C1.int(), C2.int(), relerr)
244
+ F.histogram_scatter_add_2d(
245
+ counts, C1.int(), C2.int(), torch.ones_like(abserr)
246
+ )
247
+
248
+ p_data_fp32 += -step_size * update_fp32
249
+
250
+ if not dist.is_initialized() or dist.get_rank() == 0:
251
+ if self.savedir != "" and state["step"] % 100 == 0:
252
+ if not os.path.exists(self.savedir):
253
+ os.makedirs(self.savedir)
254
+ shapestr = "_".join(
255
+ [str(dim) for dim in p_data_fp32.shape]
256
+ )
257
+ pathe = os.path.join(
258
+ self.savedir, f"{p_id}_{shapestr}_abserr.pkl"
259
+ )
260
+ pathrele = os.path.join(
261
+ self.savedir, f"{p_id}_{shapestr}_relerr.pkl"
262
+ )
263
+ pathcounts = os.path.join(
264
+ self.savedir, f"{p_id}_{shapestr}_counts.pkl"
265
+ )
266
+ torch.save(e, pathe)
267
+ torch.save(rele, pathrele)
268
+ torch.save(counts, pathcounts)
269
+
270
+ if p.data.dtype in {torch.float16, torch.bfloat16}:
271
+ p.data.copy_(p_data_fp32)
272
+
273
+ return loss
mgm/lib/python3.10/site-packages/bitsandbytes/optim/adamw.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from bitsandbytes.optim.optimizer import Optimizer2State
6
+
7
+
8
+
9
+ class AdamW(Optimizer2State):
10
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
11
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
12
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
13
+
14
+ class AdamW8bit(Optimizer2State):
15
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
16
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
17
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged )
18
+
19
+ class AdamW32bit(Optimizer2State):
20
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
21
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True, is_paged=False):
22
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=is_paged)
23
+
24
+
25
+ class PagedAdamW(Optimizer2State):
26
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
27
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
28
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, optim_bits, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
29
+
30
+ class PagedAdamW8bit(Optimizer2State):
31
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
32
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
33
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 8, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
34
+
35
+ class PagedAdamW32bit(Optimizer2State):
36
+ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, optim_bits=32,
37
+ args=None, min_8bit_size=4096, percentile_clipping=100, block_wise=True):
38
+ super().__init__( "adam", params, lr, betas, eps, weight_decay, 32, args, min_8bit_size, percentile_clipping, block_wise, is_paged=True)
39
+