ZTWHHH commited on
Commit
9d07e77
·
verified ·
1 Parent(s): 2b816c7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc +0 -0
  4. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc +0 -0
  5. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/consts.cpython-310.pyc +0 -0
  6. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cuda_specs.cpython-310.pyc +0 -0
  7. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc +0 -0
  8. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc +0 -0
  9. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__init__.py +0 -0
  10. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc +0 -0
  11. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc +0 -0
  12. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc +0 -0
  13. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc +0 -0
  14. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py +176 -0
  15. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py +85 -0
  16. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py +12 -0
  17. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so +3 -0
  18. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so +3 -0
  19. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py +26 -0
  20. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  21. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc +0 -0
  22. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc +0 -0
  23. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/modules.py +1061 -0
  24. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py +264 -0
  25. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  26. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc +0 -0
  27. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__init__.py +6 -0
  28. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc +0 -0
  29. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py +0 -0
  30. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  31. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc +0 -0
  32. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py +396 -0
  33. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py +1 -0
  34. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  35. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc +0 -0
  36. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py +76 -0
  37. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py +0 -0
  38. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc +0 -0
  39. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc +0 -0
  40. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc +0 -0
  41. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc +0 -0
  42. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc +0 -0
  43. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc +0 -0
  44. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc +0 -0
  45. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc +0 -0
  46. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py +64 -0
  47. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py +205 -0
  48. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py +206 -0
  49. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py +75 -0
  50. evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py +124 -0
.gitattributes CHANGED
@@ -801,3 +801,5 @@ evalkit_eagle/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython
801
  evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
802
  evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
803
  evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
801
  evalkit_eagle/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
802
  evalkit_eagle/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
803
  evalkit_eagle/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
804
+ evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so filter=lfs diff=lfs merge=lfs -text
805
+ evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so filter=lfs diff=lfs merge=lfs -text
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (583 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (273 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cextension.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/consts.cpython-310.pyc ADDED
Binary file (569 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/cuda_specs.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/functional.cpython-310.pyc ADDED
Binary file (77.2 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.52 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__init__.py ADDED
File without changes
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc ADDED
Binary file (644 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, Iterable, Iterator
5
+
6
+ import torch
7
+
8
+ from bitsandbytes.cextension import get_cuda_bnb_library_path
9
+ from bitsandbytes.consts import NONPYTORCH_DOC_URL
10
+ from bitsandbytes.cuda_specs import CUDASpecs
11
+ from bitsandbytes.diagnostics.utils import print_dedented
12
+
13
+ CUDART_PATH_PREFERRED_ENVVARS = ("CONDA_PREFIX", "LD_LIBRARY_PATH")
14
+
15
+ CUDART_PATH_IGNORED_ENVVARS = {
16
+ "DBUS_SESSION_BUS_ADDRESS", # hardware related
17
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # GCP: requires elevated permissions, causing problems in VMs and Jupyter notebooks
18
+ "HOME", # Linux shell default
19
+ "LESSCLOSE",
20
+ "LESSOPEN", # related to the `less` command
21
+ "MAIL", # something related to emails
22
+ "OLDPWD",
23
+ "PATH", # this is for finding binaries, not libraries
24
+ "PWD", # PWD: this is how the shell keeps track of the current working dir
25
+ "SHELL", # binary for currently invoked shell
26
+ "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
27
+ "SSH_TTY",
28
+ "TMUX", # Terminal Multiplexer
29
+ "XDG_DATA_DIRS", # XDG: Desktop environment stuff
30
+ "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
31
+ "XDG_RUNTIME_DIR",
32
+ "_", # current Python interpreter
33
+ }
34
+
35
+ CUDA_RUNTIME_LIB_PATTERNS = (
36
+ "cudart64*.dll", # Windows
37
+ "libcudart*.so*", # libcudart.so, libcudart.so.11.0, libcudart.so.12.0, libcudart.so.12.1, libcudart.so.12.2 etc.
38
+ "nvcuda*.dll", # Windows
39
+ )
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ def find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]:
45
+ for dir_string in paths_list_candidate.split(os.pathsep):
46
+ if not dir_string:
47
+ continue
48
+ if os.sep not in dir_string:
49
+ continue
50
+ try:
51
+ dir = Path(dir_string)
52
+ try:
53
+ if not dir.exists():
54
+ logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
55
+ continue
56
+ except OSError: # Assume an esoteric error trying to poke at the directory
57
+ pass
58
+ for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:
59
+ for pth in dir.glob(lib_pattern):
60
+ if pth.is_file():
61
+ yield pth
62
+ except (OSError, PermissionError):
63
+ pass
64
+
65
+
66
+ def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
67
+ return (
68
+ env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location
69
+ or (
70
+ os.sep in value # might contain a path
71
+ and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored
72
+ and "CONDA" not in env_var # not another conda envvar
73
+ and "BASH_FUNC" not in env_var # not a bash function defined via envvar
74
+ and "\n" not in value # likely e.g. a script or something?
75
+ )
76
+ )
77
+
78
+
79
+ def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
80
+ return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}
81
+
82
+
83
+ def find_cudart_libraries() -> Iterator[Path]:
84
+ """
85
+ Searches for a cuda installations, in the following order of priority:
86
+ 1. active conda env
87
+ 2. LD_LIBRARY_PATH
88
+ 3. any other env vars, while ignoring those that
89
+ - are known to be unrelated
90
+ - don't contain the path separator `/`
91
+
92
+ If multiple libraries are found in part 3, we optimistically try one,
93
+ while giving a warning message.
94
+ """
95
+ candidate_env_vars = get_potentially_lib_path_containing_env_vars()
96
+
97
+ for envvar in CUDART_PATH_PREFERRED_ENVVARS:
98
+ if envvar in candidate_env_vars:
99
+ directory = candidate_env_vars[envvar]
100
+ yield from find_cuda_libraries_in_path_list(directory)
101
+ candidate_env_vars.pop(envvar)
102
+
103
+ for env_var, value in candidate_env_vars.items():
104
+ yield from find_cuda_libraries_in_path_list(value)
105
+
106
+
107
+ def print_cuda_diagnostics(cuda_specs: CUDASpecs) -> None:
108
+ print(
109
+ f"PyTorch settings found: CUDA_VERSION={cuda_specs.cuda_version_string}, "
110
+ f"Highest Compute Capability: {cuda_specs.highest_compute_capability}.",
111
+ )
112
+
113
+ binary_path = get_cuda_bnb_library_path(cuda_specs)
114
+ if not binary_path.exists():
115
+ print_dedented(
116
+ f"""
117
+ Library not found: {binary_path}. Maybe you need to compile it from source?
118
+ If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION`,
119
+ for example, `make CUDA_VERSION=113`.
120
+
121
+ The CUDA version for the compile might depend on your conda install, if using conda.
122
+ Inspect CUDA version via `conda list | grep cuda`.
123
+ """,
124
+ )
125
+
126
+ cuda_major, cuda_minor = cuda_specs.cuda_version_tuple
127
+ if cuda_major < 11:
128
+ print_dedented(
129
+ """
130
+ WARNING: CUDA versions lower than 11 are currently not supported for LLM.int8().
131
+ You will be only to use 8-bit optimizers and quantization routines!
132
+ """,
133
+ )
134
+
135
+ print(f"To manually override the PyTorch CUDA version please see: {NONPYTORCH_DOC_URL}")
136
+
137
+ # 7.5 is the minimum CC for int8 tensor cores
138
+ if not cuda_specs.has_imma:
139
+ print_dedented(
140
+ """
141
+ WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!
142
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization:
143
+ https://huggingface.co/blog/4bit-transformers-bitsandbytes
144
+ """,
145
+ )
146
+
147
+ # TODO:
148
+ # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
149
+ # (2) Multiple CUDA versions installed
150
+
151
+
152
+ def print_cuda_runtime_diagnostics() -> None:
153
+ cudart_paths = list(find_cudart_libraries())
154
+ if not cudart_paths:
155
+ print("CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.")
156
+ elif len(cudart_paths) > 1:
157
+ print_dedented(
158
+ f"""
159
+ Found duplicate CUDA runtime files (see below).
160
+
161
+ We select the PyTorch default CUDA runtime, which is {torch.version.cuda},
162
+ but this might mismatch with the CUDA version that is needed for bitsandbytes.
163
+ To override this behavior set the `BNB_CUDA_VERSION=<version string, e.g. 122>` environmental variable.
164
+
165
+ For example, if you want to use the CUDA version 122,
166
+ BNB_CUDA_VERSION=122 python ...
167
+
168
+ OR set the environmental variable in your .bashrc:
169
+ export BNB_CUDA_VERSION=122
170
+
171
+ In the case of a manual override, make sure you set LD_LIBRARY_PATH, e.g.
172
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2,
173
+ """,
174
+ )
175
+ for pth in cudart_paths:
176
+ print(f"* Found CUDA runtime at: {pth}")
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import traceback
3
+
4
+ import torch
5
+
6
+ from bitsandbytes.consts import PACKAGE_GITHUB_URL
7
+ from bitsandbytes.cuda_specs import get_cuda_specs
8
+ from bitsandbytes.diagnostics.cuda import (
9
+ print_cuda_diagnostics,
10
+ print_cuda_runtime_diagnostics,
11
+ )
12
+ from bitsandbytes.diagnostics.utils import print_dedented, print_header
13
+
14
+
15
+ def sanity_check():
16
+ from bitsandbytes.cextension import lib
17
+
18
+ if lib is None:
19
+ print_dedented(
20
+ """
21
+ Couldn't load the bitsandbytes library, likely due to missing binaries.
22
+ Please ensure bitsandbytes is properly installed.
23
+
24
+ For source installations, compile the binaries with `cmake -DCOMPUTE_BACKEND=cuda -S .`.
25
+ See the documentation for more details if needed.
26
+
27
+ Trying a simple check anyway, but this will likely fail...
28
+ """,
29
+ )
30
+
31
+ from bitsandbytes.optim import Adam
32
+
33
+ p = torch.nn.Parameter(torch.rand(10, 10).cuda())
34
+ a = torch.rand(10, 10).cuda()
35
+ p1 = p.data.sum().item()
36
+ adam = Adam([p])
37
+ out = a * p
38
+ loss = out.sum()
39
+ loss.backward()
40
+ adam.step()
41
+ p2 = p.data.sum().item()
42
+ assert p1 != p2
43
+
44
+
45
+ def main():
46
+ print_header("")
47
+ print_header("BUG REPORT INFORMATION")
48
+ print_header("")
49
+
50
+ print_header("OTHER")
51
+ cuda_specs = get_cuda_specs()
52
+ print("CUDA specs:", cuda_specs)
53
+ if not torch.cuda.is_available():
54
+ print("Torch says CUDA is not available. Possible reasons:")
55
+ print("1. CUDA driver not installed")
56
+ print("2. CUDA not installed")
57
+ print("3. You have multiple conflicting CUDA libraries")
58
+ if cuda_specs:
59
+ print_cuda_diagnostics(cuda_specs)
60
+ print_cuda_runtime_diagnostics()
61
+ print_header("")
62
+ print_header("DEBUG INFO END")
63
+ print_header("")
64
+ print("Checking that the library is importable and CUDA is callable...")
65
+ try:
66
+ sanity_check()
67
+ print("SUCCESS!")
68
+ print("Installation was successful!")
69
+ return
70
+ except ImportError:
71
+ print(
72
+ f"WARNING: {__package__} is currently running as CPU-only!\n"
73
+ "Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
74
+ f"If you think that this is so erroneously,\nplease report an issue!",
75
+ )
76
+ except Exception:
77
+ traceback.print_exc()
78
+ print_dedented(
79
+ f"""
80
+ Above we output some debug information.
81
+ Please provide this info when creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose
82
+ WARNING: Please be sure to sanitize sensitive info from the output before posting it.
83
+ """,
84
+ )
85
+ sys.exit(1)
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textwrap
2
+
3
+ HEADER_WIDTH = 60
4
+
5
+
6
+ def print_header(txt: str, width: int = HEADER_WIDTH, filler: str = "+") -> None:
7
+ txt = f" {txt} " if txt else ""
8
+ print(txt.center(width, filler))
9
+
10
+
11
+ def print_dedented(text):
12
+ print("\n".join(textwrap.dedent(text).strip().split("\n")))
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda121.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906a547b09630095e0355cc3780c84169e1b222a00571b0f738760da5f3df665
3
+ size 25822992
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cuda124.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06689764b9601734d0e8f5bec701255fd38d4480de704d637800c8874348717a
3
+ size 25123696
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from .modules import (
6
+ Embedding,
7
+ Embedding4bit,
8
+ Embedding8bit,
9
+ EmbeddingFP4,
10
+ EmbeddingNF4,
11
+ Int8Params,
12
+ Linear4bit,
13
+ Linear8bitLt,
14
+ LinearFP4,
15
+ LinearNF4,
16
+ OutlierAwareLinear,
17
+ Params4bit,
18
+ StableEmbedding,
19
+ SwitchBackLinearBnb,
20
+ )
21
+ from .triton_based_modules import (
22
+ StandardLinear,
23
+ SwitchBackLinear,
24
+ SwitchBackLinearGlobal,
25
+ SwitchBackLinearVectorwise,
26
+ )
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (676 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc ADDED
Binary file (28.3 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc ADDED
Binary file (6.93 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/modules.py ADDED
@@ -0,0 +1,1061 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import copy
6
+ from typing import Any, Dict, Optional, TypeVar, Union, overload
7
+ import warnings
8
+
9
+ import torch
10
+ from torch import Tensor, device, dtype, nn
11
+ import torch.nn.functional as F
12
+
13
+ import bitsandbytes as bnb
14
+ from bitsandbytes.autograd._functions import get_tile_inds, undo_layout
15
+ from bitsandbytes.functional import QuantState
16
+ from bitsandbytes.optim import GlobalOptimManager
17
+ from bitsandbytes.utils import (
18
+ INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING,
19
+ OutlierTracer,
20
+ )
21
+
22
+ T = TypeVar("T", bound="torch.nn.Module")
23
+
24
+
25
+ class StableEmbedding(torch.nn.Embedding):
26
+ """
27
+ Custom embedding layer designed to improve stability during training for NLP tasks by using 32-bit optimizer states. It is designed to reduce gradient variations that can result from quantization. This embedding layer is initialized with Xavier uniform initialization followed by layer normalization.
28
+
29
+ Example:
30
+
31
+ ```
32
+ # Initialize StableEmbedding layer with vocabulary size 1000, embedding dimension 300
33
+ embedding_layer = StableEmbedding(num_embeddings=1000, embedding_dim=300)
34
+
35
+ # Reset embedding parameters
36
+ embedding_layer.reset_parameters()
37
+
38
+ # Perform a forward pass with input tensor
39
+ input_tensor = torch.tensor([1, 2, 3])
40
+ output_embedding = embedding_layer(input_tensor)
41
+ ```
42
+
43
+ Attributes:
44
+ norm (`torch.nn.LayerNorm`): Layer normalization applied after the embedding.
45
+
46
+ Methods:
47
+ reset_parameters(): Reset embedding parameters using Xavier uniform initialization.
48
+ forward(input: Tensor) -> Tensor: Forward pass through the stable embedding layer.
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ num_embeddings: int,
54
+ embedding_dim: int,
55
+ padding_idx: Optional[int] = None,
56
+ max_norm: Optional[float] = None,
57
+ norm_type: float = 2.0,
58
+ scale_grad_by_freq: bool = False,
59
+ sparse: bool = False,
60
+ _weight: Optional[Tensor] = None,
61
+ device=None,
62
+ dtype=None,
63
+ ) -> None:
64
+ """
65
+ Args:
66
+ num_embeddings (`int`):
67
+ The number of unique embeddings (vocabulary size).
68
+ embedding_dim (`int`):
69
+ The dimensionality of the embedding.
70
+ padding_idx (`Optional[int]`):
71
+ Pads the output with zeros at the given index.
72
+ max_norm (`Optional[float]`):
73
+ Renormalizes embeddings to have a maximum L2 norm.
74
+ norm_type (`float`, defaults to `2.0`):
75
+ The p-norm to compute for the `max_norm` option.
76
+ scale_grad_by_freq (`bool`, defaults to `False`):
77
+ Scale gradient by frequency during backpropagation.
78
+ sparse (`bool`, defaults to `False`):
79
+ Computes dense gradients. Set to `True` to compute sparse gradients instead.
80
+ _weight (`Optional[Tensor]`):
81
+ Pretrained embeddings.
82
+ """
83
+ super().__init__(
84
+ num_embeddings,
85
+ embedding_dim,
86
+ padding_idx,
87
+ max_norm,
88
+ norm_type,
89
+ scale_grad_by_freq,
90
+ sparse,
91
+ _weight,
92
+ device,
93
+ dtype,
94
+ )
95
+ self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
96
+ GlobalOptimManager.get_instance().register_module_override(self, "weight", {"optim_bits": 32})
97
+
98
+ def reset_parameters(self) -> None:
99
+ torch.nn.init.xavier_uniform_(self.weight)
100
+ self._fill_padding_idx_with_zero()
101
+
102
+ """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
103
+ to make the Layer compatible with Pytorch < 1.9.
104
+ This means that if this changes in future PyTorch releases this need to change too
105
+ which is cumbersome. However, with this we can ensure compatibility with previous
106
+ PyTorch releases.
107
+ """
108
+
109
+ def _fill_padding_idx_with_zero(self) -> None:
110
+ if self.padding_idx is not None:
111
+ with torch.no_grad():
112
+ self.weight[self.padding_idx].fill_(0)
113
+
114
+ def forward(self, input: Tensor) -> Tensor:
115
+ emb = F.embedding(
116
+ input,
117
+ self.weight,
118
+ self.padding_idx,
119
+ self.max_norm,
120
+ self.norm_type,
121
+ self.scale_grad_by_freq,
122
+ self.sparse,
123
+ )
124
+
125
+ # always apply layer norm in full precision
126
+ emb = emb.to(torch.get_default_dtype())
127
+
128
+ return self.norm(emb).to(self.weight.dtype)
129
+
130
+
131
+ class Embedding(torch.nn.Embedding):
132
+ """
133
+ Embedding class to store and retrieve word embeddings from their indices.
134
+ """
135
+
136
+ def __init__(
137
+ self,
138
+ num_embeddings: int,
139
+ embedding_dim: int,
140
+ padding_idx: Optional[int] = None,
141
+ max_norm: Optional[float] = None,
142
+ norm_type: float = 2.0,
143
+ scale_grad_by_freq: bool = False,
144
+ sparse: bool = False,
145
+ _weight: Optional[Tensor] = None,
146
+ device: Optional[device] = None,
147
+ ) -> None:
148
+ """
149
+ Args:
150
+ num_embeddings (`int`):
151
+ The number of unique embeddings (vocabulary size).
152
+ embedding_dim (`int`):
153
+ The dimensionality of the embedding.
154
+ padding_idx (`Optional[int]`):
155
+ Pads the output with zeros at the given index.
156
+ max_norm (`Optional[float]`):
157
+ Renormalizes embeddings to have a maximum L2 norm.
158
+ norm_type (`float`, defaults to `2.0`):
159
+ The p-norm to compute for the `max_norm` option.
160
+ scale_grad_by_freq (`bool`, defaults to `False`):
161
+ Scale gradient by frequency during backpropagation.
162
+ sparse (`bool`, defaults to `False`):
163
+ Computes dense gradients. Set to `True` to compute sparse gradients instead.
164
+ _weight (`Optional[Tensor]`):
165
+ Pretrained embeddings.
166
+ """
167
+ super().__init__(
168
+ num_embeddings,
169
+ embedding_dim,
170
+ padding_idx,
171
+ max_norm,
172
+ norm_type,
173
+ scale_grad_by_freq,
174
+ sparse,
175
+ _weight,
176
+ device=device,
177
+ )
178
+ GlobalOptimManager.get_instance().register_module_override(self, "weight", {"optim_bits": 32})
179
+
180
+ def reset_parameters(self) -> None:
181
+ torch.nn.init.xavier_uniform_(self.weight)
182
+ self._fill_padding_idx_with_zero()
183
+
184
+ """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
185
+ to make the Layer compatible with Pytorch < 1.9.
186
+ This means that if this changes in future PyTorch releases this need to change too
187
+ which is cumbersome. However, with this we can ensure compatibility with previous
188
+ PyTorch releases.
189
+ """
190
+
191
+ def _fill_padding_idx_with_zero(self) -> None:
192
+ if self.padding_idx is not None:
193
+ with torch.no_grad():
194
+ self.weight[self.padding_idx].fill_(0)
195
+
196
+ def forward(self, input: Tensor) -> Tensor:
197
+ emb = F.embedding(
198
+ input,
199
+ self.weight,
200
+ self.padding_idx,
201
+ self.max_norm,
202
+ self.norm_type,
203
+ self.scale_grad_by_freq,
204
+ self.sparse,
205
+ )
206
+
207
+ return emb
208
+
209
+
210
+ class Params4bit(torch.nn.Parameter):
211
+ def __new__(
212
+ cls,
213
+ data: Optional[torch.Tensor] = None,
214
+ requires_grad=False, # quantized weights should be frozen by default
215
+ quant_state: Optional[QuantState] = None,
216
+ blocksize: int = 64,
217
+ compress_statistics: bool = True,
218
+ quant_type: str = "fp4",
219
+ quant_storage: torch.dtype = torch.uint8,
220
+ module: Optional["Linear4bit"] = None,
221
+ bnb_quantized: bool = False,
222
+ ) -> "Params4bit":
223
+ if data is None:
224
+ data = torch.empty(0)
225
+
226
+ self = torch.Tensor._make_subclass(cls, data, requires_grad)
227
+ self.blocksize = blocksize
228
+ self.compress_statistics = compress_statistics
229
+ self.quant_type = quant_type
230
+ self.quant_state = quant_state
231
+ self.quant_storage = quant_storage
232
+ self.bnb_quantized = bnb_quantized
233
+ self.data = data
234
+ self.module = module
235
+ return self
236
+
237
+ def __getstate__(self):
238
+ state = self.__dict__.copy()
239
+ state["data"] = self.data
240
+ state["requires_grad"] = self.requires_grad
241
+ return state
242
+
243
+ def __setstate__(self, state):
244
+ self.requires_grad = state["requires_grad"]
245
+ self.blocksize = state["blocksize"]
246
+ self.compress_statistics = state["compress_statistics"]
247
+ self.quant_type = state["quant_type"]
248
+ self.quant_state = state["quant_state"]
249
+ self.data = state["data"]
250
+ self.quant_storage = state["quant_storage"]
251
+ self.bnb_quantized = state["bnb_quantized"]
252
+ self.module = state["module"]
253
+
254
+ def __deepcopy__(self, memo):
255
+ new_instance = type(self).__new__(type(self))
256
+ state = self.__getstate__()
257
+ new_instance.__setstate__(state)
258
+ new_instance.quant_state = copy.deepcopy(state["quant_state"])
259
+ new_instance.data = copy.deepcopy(state["data"])
260
+ return new_instance
261
+
262
+ def __copy__(self):
263
+ new_instance = type(self).__new__(type(self))
264
+ state = self.__getstate__()
265
+ new_instance.__setstate__(state)
266
+ return new_instance
267
+
268
+ @classmethod
269
+ def from_prequantized(
270
+ cls,
271
+ data: torch.Tensor,
272
+ quantized_stats: Dict[str, Any],
273
+ requires_grad: bool = False,
274
+ device="cuda",
275
+ module: Optional["Linear4bit"] = None,
276
+ **kwargs,
277
+ ) -> "Params4bit":
278
+ self = torch.Tensor._make_subclass(cls, data.to(device))
279
+ self.requires_grad = requires_grad
280
+ self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
281
+ self.blocksize = self.quant_state.blocksize
282
+ self.compress_statistics = self.quant_state.nested
283
+ self.quant_type = self.quant_state.quant_type
284
+ self.bnb_quantized = True
285
+
286
+ self.quant_storage = data.dtype
287
+ self.module = module
288
+
289
+ if self.module is not None:
290
+ self.module.quant_state = self.quant_state
291
+
292
+ return self
293
+
294
+ def _quantize(self, device):
295
+ w = self.data.contiguous().to(device)
296
+ w_4bit, quant_state = bnb.functional.quantize_4bit(
297
+ w,
298
+ blocksize=self.blocksize,
299
+ compress_statistics=self.compress_statistics,
300
+ quant_type=self.quant_type,
301
+ quant_storage=self.quant_storage,
302
+ )
303
+ self.data = w_4bit
304
+ self.quant_state = quant_state
305
+ if self.module is not None:
306
+ self.module.quant_state = quant_state
307
+ self.bnb_quantized = True
308
+ return self
309
+
310
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
311
+ return self.to(device="cuda" if device is None else device, non_blocking=non_blocking)
312
+
313
+ @overload
314
+ def to(
315
+ self: T,
316
+ device: Optional[Union[int, device]] = ...,
317
+ dtype: Optional[Union[dtype, str]] = ...,
318
+ non_blocking: bool = ...,
319
+ ) -> T: ...
320
+
321
+ @overload
322
+ def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...
323
+
324
+ @overload
325
+ def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...
326
+
327
+ def to(self, *args, **kwargs):
328
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
329
+
330
+ if device is not None and device.type == "cuda" and not self.bnb_quantized:
331
+ return self._quantize(device)
332
+ else:
333
+ if self.quant_state is not None:
334
+ self.quant_state.to(device)
335
+
336
+ new_param = Params4bit(
337
+ super().to(device=device, dtype=dtype, non_blocking=non_blocking),
338
+ requires_grad=self.requires_grad,
339
+ quant_state=self.quant_state,
340
+ blocksize=self.blocksize,
341
+ compress_statistics=self.compress_statistics,
342
+ quant_type=self.quant_type,
343
+ quant_storage=self.quant_storage,
344
+ )
345
+
346
+ return new_param
347
+
348
+
349
+ def fix_4bit_weight_quant_state_from_module(module: Union["Embedding4bit", "Linear4bit"]):
350
+ if getattr(module.weight, "quant_state", None) is not None:
351
+ return
352
+
353
+ if getattr(module, "quant_state", None) is None:
354
+ warnings.warn(
355
+ "FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.",
356
+ )
357
+
358
+ # the quant state got lost when the parameter got converted. This happens for example for fsdp
359
+ # since we registered the module, we can recover the state here
360
+ assert module.weight.shape[1] == 1
361
+ if not isinstance(module.weight, Params4bit):
362
+ module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)
363
+ module.weight.quant_state = module.quant_state
364
+
365
+
366
+ class Linear4bit(nn.Linear):
367
+ """
368
+ This class is the base module for the 4-bit quantization algorithm presented in [QLoRA](https://arxiv.org/abs/2305.14314).
369
+ QLoRA 4-bit linear layers uses blockwise k-bit quantization under the hood, with the possibility of selecting various
370
+ compute datatypes such as FP4 and NF4.
371
+
372
+ In order to quantize a linear layer one should first load the original fp16 / bf16 weights into
373
+ the Linear4bit module, then call `quantized_module.to("cuda")` to quantize the fp16 / bf16 weights.
374
+
375
+ Example:
376
+
377
+ ```python
378
+ import torch
379
+ import torch.nn as nn
380
+
381
+ import bitsandbytes as bnb
382
+ from bnb.nn import Linear4bit
383
+
384
+ fp16_model = nn.Sequential(
385
+ nn.Linear(64, 64),
386
+ nn.Linear(64, 64)
387
+ )
388
+
389
+ quantized_model = nn.Sequential(
390
+ Linear4bit(64, 64),
391
+ Linear4bit(64, 64)
392
+ )
393
+
394
+ quantized_model.load_state_dict(fp16_model.state_dict())
395
+ quantized_model = quantized_model.to(0) # Quantization happens here
396
+ ```
397
+ """
398
+
399
+ def __init__(
400
+ self,
401
+ input_features,
402
+ output_features,
403
+ bias=True,
404
+ compute_dtype=None,
405
+ compress_statistics=True,
406
+ quant_type="fp4",
407
+ quant_storage=torch.uint8,
408
+ device=None,
409
+ ):
410
+ """
411
+ Initialize Linear4bit class.
412
+
413
+ Args:
414
+ input_features (`str`):
415
+ Number of input features of the linear layer.
416
+ output_features (`str`):
417
+ Number of output features of the linear layer.
418
+ bias (`bool`, defaults to `True`):
419
+ Whether the linear class uses the bias term as well.
420
+ """
421
+ super().__init__(input_features, output_features, bias, device)
422
+ self.weight = Params4bit(
423
+ self.weight.data,
424
+ requires_grad=False,
425
+ compress_statistics=compress_statistics,
426
+ quant_type=quant_type,
427
+ quant_storage=quant_storage,
428
+ module=self,
429
+ )
430
+ # self.persistent_buffers = [] # TODO consider as way to save quant state
431
+ self.compute_dtype = compute_dtype
432
+ self.compute_type_is_set = False
433
+ self.quant_state = None
434
+ self.quant_storage = quant_storage
435
+
436
+ def set_compute_type(self, x):
437
+ if x.dtype in [torch.float32, torch.bfloat16]:
438
+ # the input is in a dtype that is safe to compute in, we switch
439
+ # to this type for speed and stability
440
+ self.compute_dtype = x.dtype
441
+ elif x.dtype == torch.float16:
442
+ # we take the compoute dtype passed into the layer
443
+ if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
444
+ # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
445
+ # warn the user about this
446
+ warnings.warn(
447
+ "Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.",
448
+ )
449
+ warnings.filterwarnings("ignore", message=".*inference.")
450
+ if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
451
+ warnings.warn(
452
+ "Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.",
453
+ )
454
+ warnings.filterwarnings("ignore", message=".*inference or training")
455
+
456
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
457
+ """
458
+ save weight and bias,
459
+ then fill state_dict with components of quant_state
460
+ """
461
+ super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
462
+
463
+ if getattr(self.weight, "quant_state", None) is not None:
464
+ for k, v in self.weight.quant_state.as_dict(packed=True).items():
465
+ destination[prefix + "weight." + k] = v if keep_vars else v.detach()
466
+
467
+ def forward(self, x: torch.Tensor):
468
+ fix_4bit_weight_quant_state_from_module(self)
469
+
470
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
471
+ if self.bias is not None and self.bias.dtype != x.dtype:
472
+ self.bias.data = self.bias.data.to(x.dtype)
473
+
474
+ if not self.compute_type_is_set:
475
+ self.set_compute_type(x)
476
+ self.compute_type_is_set = True
477
+
478
+ inp_dtype = x.dtype
479
+ if self.compute_dtype is not None:
480
+ x = x.to(self.compute_dtype)
481
+
482
+ bias = None if self.bias is None else self.bias.to(self.compute_dtype)
483
+
484
+ return bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)
485
+
486
+
487
+ class LinearFP4(Linear4bit):
488
+ """
489
+ Implements the FP4 data type.
490
+ """
491
+
492
+ def __init__(
493
+ self,
494
+ input_features,
495
+ output_features,
496
+ bias=True,
497
+ compute_dtype=None,
498
+ compress_statistics=True,
499
+ quant_storage=torch.uint8,
500
+ device=None,
501
+ ):
502
+ """
503
+ Args:
504
+ input_features (`str`):
505
+ Number of input features of the linear layer.
506
+ output_features (`str`):
507
+ Number of output features of the linear layer.
508
+ bias (`bool`, defaults to `True`):
509
+ Whether the linear class uses the bias term as well.
510
+ """
511
+ super().__init__(
512
+ input_features,
513
+ output_features,
514
+ bias,
515
+ compute_dtype,
516
+ compress_statistics,
517
+ "fp4",
518
+ quant_storage,
519
+ device,
520
+ )
521
+
522
+
523
+ class LinearNF4(Linear4bit):
524
+ """Implements the NF4 data type.
525
+
526
+ Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
527
+ is normalized into the range [-1, 1].
528
+
529
+ For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
530
+
531
+ Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
532
+ the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
533
+ """
534
+
535
+ def __init__(
536
+ self,
537
+ input_features,
538
+ output_features,
539
+ bias=True,
540
+ compute_dtype=None,
541
+ compress_statistics=True,
542
+ quant_storage=torch.uint8,
543
+ device=None,
544
+ ):
545
+ """
546
+ Args:
547
+ input_features (`str`):
548
+ Number of input features of the linear layer.
549
+ output_features (`str`):
550
+ Number of output features of the linear layer.
551
+ bias (`bool`, defaults to `True`):
552
+ Whether the linear class uses the bias term as well.
553
+ """
554
+ super().__init__(
555
+ input_features,
556
+ output_features,
557
+ bias,
558
+ compute_dtype,
559
+ compress_statistics,
560
+ "nf4",
561
+ quant_storage,
562
+ device,
563
+ )
564
+
565
+
566
+ class Int8Params(torch.nn.Parameter):
567
+ def __new__(
568
+ cls,
569
+ data: Optional[torch.Tensor] = None,
570
+ requires_grad=True,
571
+ has_fp16_weights=False,
572
+ CB: Optional[torch.Tensor] = None,
573
+ SCB: Optional[torch.Tensor] = None,
574
+ ):
575
+ if data is None:
576
+ data = torch.empty(0)
577
+ obj = torch.Tensor._make_subclass(cls, data, requires_grad)
578
+ obj.CB = CB
579
+ obj.SCB = SCB
580
+ obj.has_fp16_weights = has_fp16_weights
581
+ return obj
582
+
583
+ def cuda(self, device):
584
+ if self.has_fp16_weights:
585
+ return super().cuda(device)
586
+ else:
587
+ # We quantize the weight and store in 8bit row-major
588
+ B = self.data.contiguous().half().cuda(device)
589
+ CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)
590
+ self.data = CB
591
+ self.CB = CB
592
+ self.SCB = SCB
593
+
594
+ return self
595
+
596
+ def __deepcopy__(self, memo):
597
+ # adjust this if new arguments are added to the constructor
598
+ new_instance = type(self).__new__(
599
+ type(self),
600
+ data=copy.deepcopy(self.data, memo),
601
+ requires_grad=self.requires_grad,
602
+ has_fp16_weights=self.has_fp16_weights,
603
+ CB=copy.deepcopy(self.CB, memo),
604
+ SCB=copy.deepcopy(self.SCB, memo),
605
+ )
606
+ return new_instance
607
+
608
+ @overload
609
+ def to(
610
+ self: T,
611
+ device: Optional[Union[int, device]] = ...,
612
+ dtype: Optional[Union[dtype, str]] = ...,
613
+ non_blocking: bool = ...,
614
+ ) -> T: ...
615
+
616
+ @overload
617
+ def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...
618
+
619
+ @overload
620
+ def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...
621
+
622
+ def to(self, *args, **kwargs):
623
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
624
+
625
+ if device is not None and device.type == "cuda" and self.data.device.type == "cpu":
626
+ return self.cuda(device)
627
+ else:
628
+ new_param = Int8Params(
629
+ super().to(device=device, dtype=dtype, non_blocking=non_blocking),
630
+ requires_grad=self.requires_grad,
631
+ has_fp16_weights=self.has_fp16_weights,
632
+ )
633
+ new_param.CB = self.CB
634
+ new_param.SCB = self.SCB
635
+
636
+ return new_param
637
+
638
+
639
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
640
+ weight = state_dict.get(f"{prefix}weight")
641
+ if weight is None:
642
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
643
+ return
644
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
645
+
646
+ if isinstance(weight_format, torch.Tensor):
647
+ weight_format = weight_format.item()
648
+
649
+ # For new weights format storage type, we explicitly check
650
+ # if weights_format is on the mapping
651
+ if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:
652
+ raise ValueError(f"Expected supported weight format - got {weight_format}")
653
+ elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:
654
+ weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format]
655
+
656
+ if weight_format != "row":
657
+ tile_indices = get_tile_inds(weight_format, weight.device)
658
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
659
+
660
+
661
+ class Embedding8bit(nn.Embedding):
662
+ """
663
+ This class implements [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm for embedding layer
664
+
665
+ Quantization API is similar to Linear8bitLt:
666
+ ```python
667
+ import torch
668
+ import torch.nn as nn
669
+
670
+ from bitsandbytes.nn import Embedding8bit
671
+
672
+ fp16_module = nn.Embedding(128, 64)
673
+ int8_module = Embedding8bit(128, 64)
674
+
675
+ int8_module.load_state_dict(fp16_module.state_dict())
676
+
677
+ int8_module = int8_module.to(0) # Quantization happens here
678
+ ```
679
+ """
680
+
681
+ def __init__(self, num_embeddings, embedding_dim, device=None, dtype=None):
682
+ super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)
683
+ self.dtype = self.weight.data.dtype
684
+
685
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=False, requires_grad=False)
686
+
687
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
688
+ raise NotImplementedError("Saving Embedding8bit module is not implemented")
689
+
690
+ def forward(self, input: Tensor) -> Tensor:
691
+ if not hasattr(self.weight, "SCB"):
692
+ raise RuntimeError("Embedding layer is not quantized. Please call .cuda() or .to(device) first.")
693
+
694
+ rows = self.weight.data
695
+ row_stats = self.weight.SCB
696
+
697
+ assert rows.shape == (self.num_embeddings, self.embedding_dim)
698
+ assert row_stats.shape == (self.num_embeddings,)
699
+
700
+ compressed_output = F.embedding(input, rows)
701
+ compressed_output_stats = F.embedding(input, row_stats.view(self.num_embeddings, 1))
702
+
703
+ output = compressed_output * (compressed_output_stats / 127.0)
704
+
705
+ return output.to(self.dtype)
706
+
707
+
708
+ class Embedding4bit(nn.Embedding):
709
+ """
710
+ This is the base class similar to Linear4bit. It implements the 4-bit quantization algorithm presented in
711
+ [QLoRA](https://arxiv.org/abs/2305.14314) for embeddings.
712
+
713
+ Quantization API is similar to Linear4bit:
714
+ ```python
715
+ import torch
716
+ import torch.nn as nn
717
+
718
+ from bitsandbytes.nn import Embedding4bit
719
+
720
+ fp16_module = nn.Embedding(128, 64)
721
+ quantized_module = Embedding4bit(128, 64)
722
+
723
+ quantized_module.load_state_dict(fp16_module.state_dict())
724
+
725
+ quantized_module = quantized_module.to(0) # Quantization happens here
726
+ ```
727
+ """
728
+
729
+ def __init__(
730
+ self,
731
+ num_embeddings,
732
+ embedding_dim,
733
+ dtype=None,
734
+ quant_type="fp4",
735
+ quant_storage=torch.uint8,
736
+ device=None,
737
+ ):
738
+ super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)
739
+ self.dtype = self.weight.data.dtype
740
+
741
+ self.weight = Params4bit(
742
+ self.weight.data,
743
+ requires_grad=False,
744
+ compress_statistics=None,
745
+ quant_type=quant_type,
746
+ quant_storage=quant_storage,
747
+ module=self,
748
+ )
749
+
750
+ blocksize = self.weight.blocksize
751
+
752
+ if embedding_dim % blocksize != 0:
753
+ warnings.warn(
754
+ f"Embedding size {embedding_dim} is not divisible by block size {blocksize}. "
755
+ "This will lead to slow inference.",
756
+ )
757
+
758
+ def _forward_with_partial_dequantize(self, input: Tensor):
759
+ assert self.embedding_dim % self.weight.quant_state.blocksize == 0
760
+
761
+ w_4bit_uint8 = self.weight.data.view(torch.uint8).view(self.num_embeddings * self.embedding_dim // 2, 1)
762
+
763
+ output_4bit = torch.nn.functional.embedding(
764
+ weight=w_4bit_uint8.view(self.num_embeddings, self.embedding_dim // 2),
765
+ input=input,
766
+ ).view(-1, 1)
767
+ assert output_4bit.shape == (input.numel() * self.embedding_dim // 2, 1)
768
+
769
+ blocks_per_emb = self.embedding_dim // self.weight.blocksize
770
+
771
+ absmax = self.weight.quant_state.absmax
772
+ assert absmax.shape == (self.num_embeddings * blocks_per_emb,)
773
+
774
+ output_absmax = torch.nn.functional.embedding(
775
+ weight=absmax.view(self.num_embeddings, blocks_per_emb),
776
+ input=input,
777
+ ).view(
778
+ -1,
779
+ )
780
+ assert output_absmax.shape == (input.numel() * blocks_per_emb,)
781
+
782
+ output_quant_state = copy.deepcopy(self.weight.quant_state)
783
+ output_quant_state.absmax = output_absmax
784
+ output_quant_state.shape = torch.Size((*input.shape, self.embedding_dim))
785
+
786
+ output = bnb.functional.dequantize_4bit(output_4bit, output_quant_state)
787
+ assert output.shape == (*input.shape, self.embedding_dim)
788
+
789
+ return output.to(self.dtype)
790
+
791
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
792
+ raise NotImplementedError("Saving Embedding4bit module is not implemented")
793
+
794
+ def forward(self, input: Tensor) -> Tensor:
795
+ fix_4bit_weight_quant_state_from_module(self)
796
+
797
+ if self.embedding_dim % self.weight.quant_state.blocksize == 0:
798
+ return self._forward_with_partial_dequantize(input)
799
+
800
+ dequantized_weight = bnb.functional.dequantize_4bit(self.weight.data, self.weight.quant_state)
801
+
802
+ return torch.nn.functional.embedding(
803
+ weight=dequantized_weight,
804
+ input=input,
805
+ ).to(self.dtype)
806
+
807
+
808
+ class EmbeddingFP4(Embedding4bit):
809
+ def __init__(
810
+ self,
811
+ num_embeddings,
812
+ embedding_dim,
813
+ dtype=None,
814
+ quant_storage=torch.uint8,
815
+ device=None,
816
+ ):
817
+ super().__init__(
818
+ num_embeddings,
819
+ embedding_dim,
820
+ dtype=dtype,
821
+ quant_type="fp4",
822
+ quant_storage=quant_storage,
823
+ device=device,
824
+ )
825
+
826
+
827
+ class EmbeddingNF4(Embedding4bit):
828
+ def __init__(
829
+ self,
830
+ num_embeddings,
831
+ embedding_dim,
832
+ dtype=None,
833
+ quant_storage=torch.uint8,
834
+ device=None,
835
+ ):
836
+ super().__init__(
837
+ num_embeddings,
838
+ embedding_dim,
839
+ dtype=dtype,
840
+ quant_type="nf4",
841
+ quant_storage=quant_storage,
842
+ device=device,
843
+ )
844
+
845
+
846
+ class Linear8bitLt(nn.Linear):
847
+ """
848
+ This class is the base module for the [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm.
849
+ To read more about it, have a look at the paper.
850
+
851
+ In order to quantize a linear layer one should first load the original fp16 / bf16 weights into
852
+ the Linear8bitLt module, then call `int8_module.to("cuda")` to quantize the fp16 weights.
853
+
854
+ Example:
855
+
856
+ ```python
857
+ import torch
858
+ import torch.nn as nn
859
+
860
+ import bitsandbytes as bnb
861
+ from bnb.nn import Linear8bitLt
862
+
863
+ fp16_model = nn.Sequential(
864
+ nn.Linear(64, 64),
865
+ nn.Linear(64, 64)
866
+ )
867
+
868
+ int8_model = nn.Sequential(
869
+ Linear8bitLt(64, 64, has_fp16_weights=False),
870
+ Linear8bitLt(64, 64, has_fp16_weights=False)
871
+ )
872
+
873
+ int8_model.load_state_dict(fp16_model.state_dict())
874
+ int8_model = int8_model.to(0) # Quantization happens here
875
+ ```
876
+ """
877
+
878
+ def __init__(
879
+ self,
880
+ input_features: int,
881
+ output_features: int,
882
+ bias=True,
883
+ has_fp16_weights=True,
884
+ threshold=0.0,
885
+ index=None,
886
+ device=None,
887
+ ):
888
+ """
889
+ Initialize Linear8bitLt class.
890
+
891
+ Args:
892
+ input_features (`int`):
893
+ Number of input features of the linear layer.
894
+ output_features (`int`):
895
+ Number of output features of the linear layer.
896
+ bias (`bool`, defaults to `True`):
897
+ Whether the linear class uses the bias term as well.
898
+ """
899
+ super().__init__(input_features, output_features, bias, device)
900
+ self.state = bnb.MatmulLtState()
901
+ self.index = index
902
+
903
+ self.state.threshold = threshold
904
+ self.state.has_fp16_weights = has_fp16_weights
905
+
906
+ if threshold > 0.0 and not has_fp16_weights:
907
+ self.state.use_pool = True
908
+
909
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
910
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
911
+
912
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
913
+ super()._save_to_state_dict(destination, prefix, keep_vars)
914
+
915
+ # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data
916
+ scb_name = "SCB"
917
+
918
+ # case 1: .cuda was called, SCB is in self.weight
919
+ param_from_weight = getattr(self.weight, scb_name)
920
+ # case 2: self.init_8bit_state was called, SCB is in self.state
921
+ param_from_state = getattr(self.state, scb_name)
922
+
923
+ key_name = prefix + f"{scb_name}"
924
+
925
+ # We now only save in row-major. This format information is stored for backwards compatibility.
926
+ format_name = prefix + "weight_format"
927
+
928
+ if not self.state.has_fp16_weights:
929
+ if param_from_weight is not None:
930
+ destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()
931
+ destination[format_name] = torch.tensor(0, dtype=torch.uint8)
932
+ elif param_from_state is not None:
933
+ destination[key_name] = param_from_state if keep_vars else param_from_state.detach()
934
+ destination[format_name] = torch.tensor(0, dtype=torch.uint8)
935
+
936
+ def _load_from_state_dict(
937
+ self,
938
+ state_dict,
939
+ prefix,
940
+ local_metadata,
941
+ strict,
942
+ missing_keys,
943
+ unexpected_keys,
944
+ error_msgs,
945
+ ):
946
+ super()._load_from_state_dict(
947
+ state_dict,
948
+ prefix,
949
+ local_metadata,
950
+ strict,
951
+ missing_keys,
952
+ unexpected_keys,
953
+ error_msgs,
954
+ )
955
+ unexpected_copy = list(unexpected_keys)
956
+
957
+ for key in unexpected_copy:
958
+ input_name = key[len(prefix) :]
959
+ if input_name == "SCB":
960
+ if self.weight.SCB is None:
961
+ # buffers not yet initialized, can't access them directly without quantizing first
962
+ raise RuntimeError(
963
+ "Loading a quantized checkpoint into non-quantized Linear8bitLt is "
964
+ "not supported. Please call module.cuda() before module.load_state_dict()",
965
+ )
966
+
967
+ input_param = state_dict[key]
968
+ self.weight.SCB.copy_(input_param)
969
+
970
+ if self.state.SCB is not None:
971
+ self.state.SCB = self.weight.SCB
972
+
973
+ unexpected_keys.remove(key)
974
+
975
+ def init_8bit_state(self):
976
+ self.state.CB = self.weight.CB
977
+ self.state.SCB = self.weight.SCB
978
+ self.weight.CB = None
979
+ self.weight.SCB = None
980
+
981
+ def forward(self, x: torch.Tensor):
982
+ self.state.is_training = self.training
983
+ if self.weight.CB is not None:
984
+ self.init_8bit_state()
985
+
986
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
987
+ if self.bias is not None and self.bias.dtype != x.dtype:
988
+ self.bias.data = self.bias.data.to(x.dtype)
989
+
990
+ out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
991
+
992
+ if not self.state.has_fp16_weights and self.state.CB is not None:
993
+ self.weight.data = self.state.CB
994
+
995
+ return out
996
+
997
+
998
+ class OutlierAwareLinear(nn.Linear):
999
+ def __init__(self, input_features, output_features, bias=True, device=None):
1000
+ super().__init__(input_features, output_features, bias, device)
1001
+ self.outlier_dim = None
1002
+ self.is_quantized = False
1003
+
1004
+ def forward_with_outliers(self, x, outlier_idx):
1005
+ raise NotImplementedError("Please override the `forward_with_outliers(self, x, outlier_idx)` function")
1006
+
1007
+ def quantize_weight(self, w, outlier_idx):
1008
+ raise NotImplementedError("Please override the `quantize_weights(self, w, outlier_idx)` function")
1009
+
1010
+ def forward(self, x):
1011
+ if self.outlier_dim is None:
1012
+ tracer = OutlierTracer.get_instance()
1013
+ if not tracer.is_initialized():
1014
+ print("Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer")
1015
+ outlier_idx = tracer.get_outliers(self.weight)
1016
+ # print(outlier_idx, tracer.get_hvalue(self.weight))
1017
+ self.outlier_dim = outlier_idx
1018
+
1019
+ if not self.is_quantized:
1020
+ w = self.quantize_weight(self.weight, self.outlier_dim)
1021
+ self.weight.data.copy_(w)
1022
+ self.is_quantized = True
1023
+
1024
+
1025
+ class SwitchBackLinearBnb(nn.Linear):
1026
+ def __init__(
1027
+ self,
1028
+ input_features,
1029
+ output_features,
1030
+ bias=True,
1031
+ has_fp16_weights=True,
1032
+ memory_efficient_backward=False,
1033
+ threshold=0.0,
1034
+ index=None,
1035
+ device=None,
1036
+ ):
1037
+ super().__init__(input_features, output_features, bias, device)
1038
+ self.state = bnb.MatmulLtState()
1039
+ self.index = index
1040
+
1041
+ self.state.threshold = threshold
1042
+ self.state.has_fp16_weights = has_fp16_weights
1043
+ self.state.memory_efficient_backward = memory_efficient_backward
1044
+ if threshold > 0.0 and not has_fp16_weights:
1045
+ self.state.use_pool = True
1046
+
1047
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
1048
+
1049
+ def init_8bit_state(self):
1050
+ self.state.CB = self.weight.CB
1051
+ self.state.SCB = self.weight.SCB
1052
+ self.weight.CB = None
1053
+ self.weight.SCB = None
1054
+
1055
+ def forward(self, x):
1056
+ self.state.is_training = self.training
1057
+
1058
+ if self.weight.CB is not None:
1059
+ self.init_8bit_state()
1060
+
1061
+ out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise
7
+ from bitsandbytes.triton.int8_matmul_mixed_dequantize import (
8
+ int8_matmul_mixed_dequantize,
9
+ )
10
+ from bitsandbytes.triton.int8_matmul_rowwise_dequantize import (
11
+ int8_matmul_rowwise_dequantize,
12
+ )
13
+ from bitsandbytes.triton.quantize_columnwise_and_transpose import (
14
+ quantize_columnwise_and_transpose,
15
+ )
16
+ from bitsandbytes.triton.quantize_global import (
17
+ quantize_global,
18
+ quantize_global_transpose,
19
+ )
20
+ from bitsandbytes.triton.quantize_rowwise import quantize_rowwise
21
+ from bitsandbytes.triton.triton_utils import is_triton_available
22
+
23
+
24
+ class _switchback_global(torch.autograd.Function):
25
+ @staticmethod
26
+ def forward(ctx, X_3D, W, bias):
27
+ # reshape input to [N * L, D]
28
+ X = X_3D.view(-1, X_3D.size(-1))
29
+
30
+ # rowwise quantize for X, global quantize for W
31
+ X_int8, state_X = quantize_rowwise(X)
32
+ W_int8, state_W = quantize_global(W)
33
+
34
+ # save for backward.
35
+ ctx.save_for_backward = X, W
36
+
37
+ # matmult, fused dequant and add bias
38
+ # call "mixed" because we are mixing rowwise quantized and global quantized
39
+ return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)
40
+
41
+ @staticmethod
42
+ def backward(ctx, G_3D):
43
+ # reshape input to [N_out * L, D]
44
+ G = G_3D.reshape(-1, G_3D.size(-1))
45
+
46
+ grad_X = grad_W = grad_bias = None
47
+
48
+ X, W = ctx.save_for_backward
49
+ if ctx.needs_input_grad[0]:
50
+ # rowwise quantize for G, global quantize for W
51
+ # for W, we also fuse the transpose operation because only A @ B^T is supported
52
+ # so we transpose once then call .t() in the matmul
53
+ G_int8, state_G = quantize_rowwise(G)
54
+ W_int8, state_W = quantize_global_transpose(W)
55
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
56
+ *G_3D.size()[:-1],
57
+ -1,
58
+ )
59
+ if ctx.needs_input_grad[1]:
60
+ # backward pass uses standard weight grad
61
+ grad_W = torch.matmul(G.t(), X.to(G.dtype))
62
+ if ctx.needs_input_grad[2]:
63
+ grad_bias = G.sum(dim=0)
64
+
65
+ return grad_X, grad_W, grad_bias
66
+
67
+
68
+ class _switchback_vectorrize(torch.autograd.Function):
69
+ @staticmethod
70
+ def forward(ctx, X_3D, W, bias):
71
+ # reshape input to [N * L, D]
72
+ X = X_3D.view(-1, X_3D.size(-1))
73
+
74
+ ctx.save_for_backward = X, W
75
+ # rowwise quantize for X
76
+ # columnwise quantize for W (first rowwise, transpose later)
77
+ X_int8, state_X = quantize_rowwise(X)
78
+ W_int8, state_W = quantize_rowwise(W)
79
+
80
+ # matmult, fused dequant and add bias
81
+ # call kernel which expects rowwise quantized X and W
82
+ return int8_matmul_rowwise_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)
83
+
84
+ @staticmethod
85
+ def backward(ctx, G_3D):
86
+ X, W = ctx.save_for_backward
87
+
88
+ G = G_3D.reshape(-1, G_3D.size(-1))
89
+
90
+ grad_X = grad_W = grad_bias = None
91
+
92
+ if ctx.needs_input_grad[0]:
93
+ # rowwise quantize for G, columnwise quantize for W and fused transpose
94
+ # we call .t() for weight later because only A @ B^T is supported
95
+ G_int8, state_G = quantize_rowwise(G)
96
+ W_int8, state_W = quantize_columnwise_and_transpose(W)
97
+ grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
98
+ *G_3D.size()[:-1],
99
+ -1,
100
+ )
101
+ if ctx.needs_input_grad[1]:
102
+ # backward pass uses standard weight grad
103
+ grad_W = torch.matmul(G.t(), X.to(G.dtype))
104
+ if ctx.needs_input_grad[2]:
105
+ grad_bias = G.sum(dim=0)
106
+
107
+ return grad_X, grad_W, grad_bias
108
+
109
+
110
+ class _switchback_global_mem_efficient(torch.autograd.Function):
111
+ @staticmethod
112
+ def forward(ctx, X_3D, W, bias):
113
+ # reshape input to [N * L, D]
114
+ X = X_3D.view(-1, X_3D.size(-1))
115
+ X_3D_sz = X_3D.size()
116
+
117
+ # rowwise quantize for X, global quantize for W
118
+ X_int8, state_X = quantize_rowwise(X)
119
+ del X
120
+ W_int8, state_W = quantize_global(W)
121
+
122
+ # save for backward.
123
+ ctx.save_for_backward = X_int8, state_X, W_int8, state_W
124
+
125
+ # matmult, fused dequant and add bias
126
+ # call "mixed" because we are mixing rowwise quantized and global quantized
127
+ return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D_sz[:-1], -1)
128
+
129
+ @staticmethod
130
+ def backward(ctx, G_3D):
131
+ # reshape input to [N_out * L, D]
132
+ G = G_3D.reshape(-1, G_3D.size(-1))
133
+ G_3D_sz = G_3D.size()
134
+
135
+ grad_X = grad_W = grad_bias = None
136
+
137
+ X_int8, state_X, W_int8, state_W = ctx.save_for_backward
138
+ if ctx.needs_input_grad[1]:
139
+ real_X = dequantize_rowwise(X_int8, state_X)
140
+ del X_int8
141
+ grad_W = torch.matmul(G.t(), real_X.to(G.dtype))
142
+ del real_X
143
+ if ctx.needs_input_grad[2]:
144
+ grad_bias = G.sum(dim=0)
145
+ if ctx.needs_input_grad[0]:
146
+ G_int8, state_G = quantize_rowwise(G)
147
+ del G
148
+ W_int8 = W_int8.t().contiguous()
149
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1)
150
+
151
+ return grad_X, grad_W, grad_bias
152
+
153
+
154
+ class SwitchBackLinear(nn.Linear):
155
+ def __init__(
156
+ self,
157
+ in_features: int,
158
+ out_features: int,
159
+ bias: bool = True,
160
+ device=None,
161
+ dtype=None,
162
+ vector_wise_quantization: bool = False,
163
+ mem_efficient: bool = False,
164
+ ):
165
+ super().__init__(in_features, out_features, bias, device, dtype)
166
+
167
+ if not is_triton_available():
168
+ raise ImportError("""Could not import triton. Please install triton to use SwitchBackLinear.
169
+ Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower""")
170
+
171
+ # By default, we use the global quantization.
172
+ self.vector_wise_quantization = vector_wise_quantization
173
+ if self.vector_wise_quantization:
174
+ self._fn = _switchback_vectorrize
175
+ if mem_efficient:
176
+ print("mem efficient is not supported for vector-wise quantization.")
177
+ exit(1)
178
+ else:
179
+ if mem_efficient:
180
+ self._fn = _switchback_global_mem_efficient
181
+ else:
182
+ self._fn = _switchback_global
183
+
184
+ def prepare_for_eval(self):
185
+ # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.
186
+ # Note this is experimental and not tested thoroughly.
187
+ # Note this needs to be explicitly called with something like
188
+ # def cond_prepare(m):
189
+ # if hasattr(m, "prepare_for_eval"):
190
+ # m.prepare_for_eval()
191
+ # model.apply(cond_prepare)
192
+ print("=> preparing for eval.")
193
+ if self.vector_wise_quantization:
194
+ W_int8, state_W = quantize_rowwise(self.weight)
195
+ else:
196
+ W_int8, state_W = quantize_global(self.weight)
197
+
198
+ self.register_buffer("W_int8", W_int8)
199
+ self.register_buffer("state_W", state_W)
200
+
201
+ del self.weight
202
+
203
+ def forward(self, x):
204
+ if self.training:
205
+ return self._fn.apply(x, self.weight, self.bias)
206
+ else:
207
+ # If it hasn't been "prepared for eval", run the standard forward pass.
208
+ if not hasattr(self, "W_int8"):
209
+ return self._fn.apply(x, self.weight, self.bias)
210
+
211
+ # Otherwise, use pre-computed weights.
212
+ X = x.view(-1, x.size(-1))
213
+ X_int8, state_X = quantize_rowwise(X)
214
+
215
+ if self.vector_wise_quantization:
216
+ return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(
217
+ *x.size()[:-1],
218
+ -1,
219
+ )
220
+ else:
221
+ return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(
222
+ *x.size()[:-1],
223
+ -1,
224
+ )
225
+
226
+
227
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)
228
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)
229
+ SwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)
230
+
231
+
232
+ # This is just the standard linear function.
233
+ class StandardLinearFunction(torch.autograd.Function):
234
+ @staticmethod
235
+ def forward(ctx, input, weight, bias=None):
236
+ X = input.view(-1, input.size(-1))
237
+
238
+ ctx.save_for_backward(X, weight, bias)
239
+ output = input.matmul(weight.t())
240
+ if bias is not None:
241
+ output += bias.unsqueeze(0).expand_as(output)
242
+ return output.view(*input.size()[:-1], -1)
243
+
244
+ @staticmethod
245
+ def backward(ctx, grad_output_3D):
246
+ input, weight, bias = ctx.saved_tensors
247
+
248
+ grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))
249
+
250
+ grad_input = grad_weight = grad_bias = None
251
+
252
+ if ctx.needs_input_grad[0]:
253
+ grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)
254
+ if ctx.needs_input_grad[1]:
255
+ grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))
256
+ if bias is not None and ctx.needs_input_grad[2]:
257
+ grad_bias = grad_output.sum(0)
258
+
259
+ return grad_input, grad_weight, grad_bias
260
+
261
+
262
+ class StandardLinear(nn.Linear):
263
+ def forward(self, x):
264
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/lion.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import nn
2
+ from .autograd._functions import (
3
+ matmul_fp8_global,
4
+ matmul_fp8_mixed,
5
+ switchback_bnb,
6
+ )
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (320 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py ADDED
File without changes
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (8.43 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce # Required in Python 3
2
+ import operator
3
+ from typing import Optional
4
+ import warnings
5
+
6
+ import torch
7
+
8
+ from bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState
9
+ import bitsandbytes.functional as F
10
+
11
+
12
+ # math.prod not compatible with python < 3.8
13
+ def prod(iterable):
14
+ return reduce(operator.mul, iterable, 1)
15
+
16
+
17
+ class MatMulFP8Mixed(torch.autograd.Function):
18
+ # forward is the same, but we added the fallback for pre-turing GPUs
19
+ # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
20
+
21
+ @staticmethod
22
+ def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):
23
+ # default of pytorch behavior if inputs are empty
24
+ ctx.is_empty = False
25
+ if prod(A.shape) == 0:
26
+ ctx.is_empty = True
27
+ ctx.A = A
28
+ ctx.B = B
29
+
30
+ B_shape = B.shape
31
+ if A.shape[-1] == B_shape[0]:
32
+ return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
33
+ else:
34
+ return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
35
+
36
+ # 1. Dequantize
37
+ # 2. MatmulnN
38
+ cA, state = F.quantize_blockwise(A, code=fw_code, blocksize=bsz)
39
+ fp8A = F.dequantize_blockwise(cA, state, blocksize=bsz).to(A.dtype)
40
+
41
+ cB, state = F.quantize(B.float(), code=fw_code)
42
+ fp8B = F.dequantize(cB, state).to(B.dtype)
43
+
44
+ output = torch.matmul(fp8A, fp8B)
45
+
46
+ # output is half
47
+
48
+ # 3. Save state
49
+ ctx.fw_code = fw_code
50
+ ctx.bw_code = bw_code
51
+ ctx.bsz = bsz
52
+ ctx.bsz2 = bsz2
53
+ ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype
54
+
55
+ if any(ctx.needs_input_grad[:2]):
56
+ # NOTE: we send back A, and re-quant.
57
+ ctx.tensors = (A, fp8B)
58
+ else:
59
+ ctx.tensors = (None, None)
60
+
61
+ return output
62
+
63
+ @staticmethod
64
+ def backward(ctx, grad_output):
65
+ if ctx.is_empty:
66
+ return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None
67
+
68
+ req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad
69
+ A, B = ctx.tensors
70
+
71
+ grad_A, grad_B = None, None
72
+
73
+ # TODO: Fix blocksize to be output_dim
74
+ cgrad_out, state = F.quantize_blockwise(grad_output, code=ctx.bw_code, blocksize=ctx.bsz2)
75
+ fp8out = F.dequantize_blockwise(cgrad_out, state, blocksize=ctx.bsz2).to(grad_output.dtype)
76
+
77
+ # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)
78
+ # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)
79
+
80
+ # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
81
+ # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')
82
+ # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose
83
+ # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])
84
+
85
+ # not supported by PyTorch. TODO: create work-around
86
+ if req_gradA:
87
+ grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)
88
+
89
+ if req_gradB:
90
+ if len(A.shape) == 3:
91
+ At = A.transpose(2, 1).contiguous()
92
+ else:
93
+ At = A.transpose(1, 0).contiguous()
94
+ # cA, state = F.quantize(At.float(), code=ctx.fw_code)
95
+ # fp8At = F.dequantize(cA, state).to(A.dtype)
96
+ grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype)
97
+
98
+ return grad_A, grad_B, None, None, None, None, None
99
+
100
+
101
+ class MatMulFP8Global(torch.autograd.Function):
102
+ # forward is the same, but we added the fallback for pre-turing GPUs
103
+ # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
104
+
105
+ @staticmethod
106
+ def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):
107
+ # default of pytorch behavior if inputs are empty
108
+ ctx.is_empty = False
109
+ if prod(A.shape) == 0:
110
+ ctx.is_empty = True
111
+ ctx.A = A
112
+ ctx.B = B
113
+
114
+ B_shape = B.shape
115
+ if A.shape[-1] == B_shape[0]:
116
+ return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
117
+ else:
118
+ return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
119
+
120
+ # 1. Dequantize
121
+ # 2. MatmulnN
122
+ cA, state = F.quantize(A.float(), code=fw_code)
123
+ fp8A = F.dequantize(cA, state).to(A.dtype)
124
+
125
+ cB, state = F.quantize(B.float(), code=fw_code)
126
+ fp8B = F.dequantize(cB, state).to(B.dtype)
127
+
128
+ output = torch.matmul(fp8A, fp8B)
129
+
130
+ # output is half
131
+
132
+ # 3. Save state
133
+ ctx.fw_code = fw_code
134
+ ctx.bw_code = bw_code
135
+ ctx.bsz = bsz
136
+ ctx.bsz2 = bsz2
137
+ ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype
138
+
139
+ if any(ctx.needs_input_grad[:2]):
140
+ # NOTE: we send back A, and re-quant.
141
+ ctx.tensors = (A, fp8B)
142
+ else:
143
+ ctx.tensors = (None, None)
144
+
145
+ return output
146
+
147
+ @staticmethod
148
+ def backward(ctx, grad_output):
149
+ if ctx.is_empty:
150
+ return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None
151
+
152
+ req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad
153
+ A, B = ctx.tensors
154
+
155
+ grad_A, grad_B = None, None
156
+
157
+ # TODO: Fix blocksize to be output_dim
158
+ cgrad_out, state = F.quantize(grad_output.float(), code=ctx.bw_code)
159
+ fp8out = F.dequantize(cgrad_out, state).to(grad_output.dtype)
160
+
161
+ # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)
162
+ # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)
163
+
164
+ # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
165
+ # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')
166
+ # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose
167
+ # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])
168
+
169
+ # not supported by PyTorch. TODO: create work-around
170
+ if req_gradA:
171
+ grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)
172
+
173
+ if req_gradB:
174
+ if len(A.shape) == 3:
175
+ At = A.transpose(2, 1).contiguous()
176
+ else:
177
+ At = A.transpose(1, 0).contiguous()
178
+ cA, state = F.quantize(At.float(), code=ctx.fw_code)
179
+ fp8At = F.dequantize(cA, state).to(A.dtype)
180
+ grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype)
181
+
182
+ return grad_A, grad_B, None, None, None, None, None
183
+
184
+
185
+ class SwitchBackBnb(torch.autograd.Function):
186
+ @staticmethod
187
+ def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = None):
188
+ state = state or MatmulLtState()
189
+
190
+ # default to pytorch behavior if inputs are empty
191
+ ctx.is_empty = False
192
+ if prod(A.shape) == 0:
193
+ ctx.is_empty = True
194
+ ctx.A = A
195
+ ctx.B = B
196
+ ctx.bias = bias
197
+ if A.shape[-1] == B.shape[0]:
198
+ return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)
199
+ else:
200
+ return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)
201
+
202
+ # 1. Quantize A
203
+ # 2. Quantize B
204
+ # 3. Matmul
205
+ # 4. Mixed-precision decomposition matmul
206
+ # 5. Save state
207
+ input_shape = A.shape
208
+ if state.outlier_pool is None:
209
+ state.outlier_pool = GlobalOutlierPooler.get_instance()
210
+
211
+ # Cast A to fp16
212
+ if A.dtype != torch.float16:
213
+ warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
214
+
215
+ # 1. Quantize A
216
+ if len(A.shape) == 3:
217
+ A = A.view(-1, A.shape[-1]).contiguous()
218
+ CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold)
219
+
220
+ if state.threshold > 0.0 and outlier_cols is not None:
221
+ if state.has_fp16_weights:
222
+ idx = outlier_cols
223
+ CA[:, idx] = 0
224
+ subA = A[:, idx]
225
+ state.subB = B[:, idx].t().contiguous()
226
+ state.idx = idx
227
+ else:
228
+ if state.SB is None:
229
+ state.SB = (state.CB.shape, "row")
230
+ else:
231
+ if not state.has_fp16_weights and state.SB is None:
232
+ state.SB = (state.CB.shape, "row")
233
+ subA = None
234
+
235
+ # 2. Quantize B
236
+ if state.has_fp16_weights:
237
+ # print('B shape', B.shape)
238
+ has_grad = True if (getattr(B, "grad", None) is not None) else False
239
+ is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
240
+ if is_transposed:
241
+ B = B.contiguous()
242
+
243
+ if (state.is_training and not has_grad) or state.SB is None:
244
+ state.reset_grads()
245
+ (
246
+ state.CB,
247
+ state.CBt,
248
+ state.SCB,
249
+ state.SCBt,
250
+ _,
251
+ ) = F.int8_double_quant(B.to(torch.float16))
252
+ state.SB = (state.CB.shape, "row")
253
+ else:
254
+ has_grad = False
255
+
256
+ if outlier_cols is not None and not state.has_fp16_weights:
257
+ # extract outliers
258
+ state.idx = outlier_cols
259
+ outliers = state.CB[:, state.idx.long()].clone()
260
+ state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)
261
+ CA[:, state.idx.long()] = 0
262
+
263
+ subA = A[:, state.idx.long()]
264
+
265
+ shapeB = state.SB[0]
266
+
267
+ if len(input_shape) == 3:
268
+ output_shape = (input_shape[0], input_shape[1], shapeB[0])
269
+ else:
270
+ output_shape = (input_shape[0], shapeB[0])
271
+
272
+ # 3. Matmul
273
+ out32 = F.int8_linear_matmul(CA, state.CB)
274
+ # we apply the fused bias here
275
+
276
+ if bias is None or bias.dtype == torch.float16:
277
+ output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=bias).to(A.dtype)
278
+ else: # apply bias separately
279
+ output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=None).to(A.dtype)
280
+ output.add_(bias)
281
+
282
+ # 4. Mixed-precision decomposition matmul
283
+ if outlier_cols is not None and subA is not None:
284
+ output += torch.matmul(subA, state.subB)
285
+
286
+ # 5. Save state
287
+ ctx.state = state
288
+
289
+ ctx.grad_shape = input_shape
290
+ ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
291
+
292
+ if any(ctx.needs_input_grad[:2]):
293
+ ctx.tensors = (CAt, subA, A)
294
+ ctx.tensor_states = (SCAt, state.idx)
295
+ else:
296
+ ctx.tensors = [None, None, None]
297
+ ctx.tensor_states = (None, None)
298
+ ctx.save_for_backward(None, None)
299
+
300
+ clone_func = torch.clone if len(output_shape) == 3 else lambda x: x
301
+ return clone_func(output.view(output_shape))
302
+
303
+ @staticmethod
304
+ def backward(ctx, grad_output):
305
+ if ctx.is_empty:
306
+ bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
307
+ return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
308
+
309
+ req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
310
+ CAt, subA, A = ctx.tensors
311
+ SCAt, idx = ctx.tensor_states
312
+ state = ctx.state
313
+ grad_A = grad_B = grad_bias = None
314
+
315
+ if req_gradBias:
316
+ # compute grad_bias first before changing grad_output dtype
317
+ grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
318
+
319
+ # Cast grad_output to fp16
320
+ if len(grad_output.shape) == 3:
321
+ grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
322
+
323
+ Cgrad, Cgradt, SCgrad, SCgradt, outlier_cols = F.int8_double_quant(grad_output.to(torch.float16))
324
+
325
+ if req_gradB:
326
+ # print('back A shape', A.shape)
327
+ # print('grad output t shape', grad_output.t().shape)
328
+ grad_B = torch.matmul(grad_output.t(), A)
329
+
330
+ if req_gradA:
331
+ if state.CB is not None:
332
+ CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
333
+ grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
334
+ else:
335
+ raise Exception("State must contain either CBt or CB matrix for backward")
336
+
337
+ return grad_A, grad_B, None, grad_bias, None
338
+
339
+
340
+ def get_block_sizes(input_matrix, weight_matrix):
341
+ input_features = input_matrix.shape[-1]
342
+ output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]
343
+ array = [4096, 2048, 1024, 512, 256, 128, 64, 0]
344
+ bsz, bsz2 = 1024, 1024
345
+ for i, k in enumerate(array):
346
+ if input_features > array[i + 1]:
347
+ bsz = k
348
+ break
349
+ for i, k in enumerate(array):
350
+ if output_features > array[i + 1]:
351
+ bsz2 = k
352
+ break
353
+
354
+ return bsz, bsz2
355
+
356
+
357
+ def matmul_fp8_global(
358
+ A: torch.Tensor,
359
+ B: torch.Tensor,
360
+ fw_code: torch.Tensor,
361
+ bw_code: torch.Tensor,
362
+ out: Optional[torch.Tensor] = None,
363
+ bsz: int = -1,
364
+ bsz2: int = -1,
365
+ ):
366
+ if bsz == -1 or bsz2 == -1:
367
+ bsz, bsz2 = get_block_sizes(A, B)
368
+ return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)
369
+
370
+
371
+ def matmul_fp8_mixed(
372
+ A: torch.Tensor,
373
+ B: torch.Tensor,
374
+ fw_code: torch.Tensor,
375
+ bw_code: torch.Tensor,
376
+ out: Optional[torch.Tensor] = None,
377
+ bsz: int = -1,
378
+ bsz2: int = -1,
379
+ ):
380
+ if bsz == -1 or bsz2 == -1:
381
+ bsz, bsz2 = get_block_sizes(A, B)
382
+ return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2)
383
+
384
+
385
+ def switchback_bnb(
386
+ A: torch.Tensor,
387
+ B: torch.Tensor,
388
+ out: Optional[torch.Tensor] = None,
389
+ state: Optional[MatmulLtState] = None,
390
+ threshold=0.0,
391
+ bias=None,
392
+ ):
393
+ state = state or MatmulLtState()
394
+ if threshold > 0.0:
395
+ state.threshold = threshold
396
+ return SwitchBackBnb.apply(A, B, out, bias, state)
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import LinearFP8Global, LinearFP8Mixed
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (255 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/__pycache__/modules.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypeVar
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ import bitsandbytes as bnb
7
+
8
+ T = TypeVar("T", bound="torch.nn.Module")
9
+
10
+
11
+ class LinearFP8Mixed(nn.Linear):
12
+ def __init__(self, input_features, output_features, bias=True):
13
+ super().__init__(input_features, output_features, bias)
14
+ self.bw_code = None
15
+ self.fw_code = None
16
+ array = [4096, 2048, 1024, 512, 256, 128, 64, 0]
17
+ for i, k in enumerate(array):
18
+ if input_features > array[i + 1]:
19
+ self.bsz = k
20
+ break
21
+ for i, k in enumerate(array):
22
+ if output_features > array[i + 1]:
23
+ self.bsz2 = k
24
+ break
25
+
26
+ def forward(self, x: torch.Tensor):
27
+ if self.fw_code is None:
28
+ self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)
29
+ self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)
30
+
31
+ out = bnb.research.matmul_fp8_mixed(
32
+ x,
33
+ self.weight.t(),
34
+ fw_code=self.fw_code,
35
+ bw_code=self.bw_code,
36
+ bsz=self.bsz,
37
+ bsz2=self.bsz2,
38
+ )
39
+ if self.bias is not None:
40
+ out += self.bias
41
+
42
+ return out
43
+
44
+
45
+ class LinearFP8Global(nn.Linear):
46
+ def __init__(self, input_features, output_features, bias=True):
47
+ super().__init__(input_features, output_features, bias)
48
+ self.bw_code = None
49
+ self.fw_code = None
50
+ array = [4096, 2048, 1024, 512, 256, 128, 64, 0]
51
+ for i, k in enumerate(array):
52
+ if input_features > array[i + 1]:
53
+ self.bsz = k
54
+ break
55
+ for i, k in enumerate(array):
56
+ if output_features > array[i + 1]:
57
+ self.bsz2 = k
58
+ break
59
+
60
+ def forward(self, x: torch.Tensor):
61
+ if self.fw_code is None:
62
+ self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)
63
+ self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)
64
+
65
+ out = bnb.matmul_fp8_global(
66
+ x,
67
+ self.weight.t(),
68
+ fw_code=self.fw_code,
69
+ bw_code=self.bw_code,
70
+ bsz=self.bsz,
71
+ bsz2=self.bsz2,
72
+ )
73
+ if self.bias is not None:
74
+ out += self.bias
75
+
76
+ return out
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py ADDED
File without changes
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc ADDED
Binary file (4.8 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc ADDED
Binary file (356 Bytes). View file
 
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+ from bitsandbytes.triton.triton_utils import is_triton_available
6
+
7
+ if not is_triton_available():
8
+
9
+ def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):
10
+ return None
11
+ else:
12
+ import triton
13
+ import triton.language as tl
14
+
15
+ # rowwise quantize
16
+
17
+ # TODO: autotune this better.
18
+ @triton.autotune(
19
+ configs=[
20
+ triton.Config({}, num_stages=1, num_warps=8),
21
+ triton.Config({}, num_stages=2, num_warps=8),
22
+ triton.Config({}, num_stages=4, num_warps=8),
23
+ triton.Config({}, num_stages=8, num_warps=8),
24
+ triton.Config({}, num_stages=1),
25
+ triton.Config({}, num_stages=2),
26
+ triton.Config({}, num_stages=4),
27
+ triton.Config({}, num_stages=8),
28
+ triton.Config({}, num_warps=1),
29
+ triton.Config({}, num_warps=2),
30
+ triton.Config({}, num_warps=4),
31
+ triton.Config({}, num_warps=8),
32
+ ],
33
+ key=["n_elements"],
34
+ )
35
+ @triton.jit
36
+ def _dequantize_rowwise(
37
+ x_ptr,
38
+ state_x,
39
+ output_ptr,
40
+ inv_127,
41
+ n_elements,
42
+ BLOCK_SIZE: tl.constexpr,
43
+ P2: tl.constexpr,
44
+ ):
45
+ pid = tl.program_id(axis=0)
46
+ block_start = pid * BLOCK_SIZE
47
+ arange = tl.arange(0, P2)
48
+ offsets = block_start + arange
49
+ row_mask = arange < BLOCK_SIZE
50
+ x = tl.load(x_ptr + offsets, mask=row_mask)
51
+ max_val = tl.load(state_x + pid)
52
+ output = max_val * x * inv_127
53
+ tl.store(output_ptr + offsets, output, mask=row_mask)
54
+
55
+ def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):
56
+ output = torch.empty(*x.shape, device=x.device, dtype=torch.float16)
57
+
58
+ P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))
59
+
60
+ assert x.is_cuda and output.is_cuda
61
+ n_elements = output.numel()
62
+ grid = lambda meta: (x.shape[0],)
63
+ _dequantize_rowwise[grid](x, state_x, output, 1.0 / 127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)
64
+ return output
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from bitsandbytes.triton.triton_utils import is_triton_available
4
+
5
+ if not is_triton_available():
6
+
7
+ def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):
8
+ return None
9
+ else:
10
+ import triton
11
+ import triton.language as tl
12
+ from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
13
+
14
+ # This is a matmul kernel based on triton.ops.matmul
15
+ # It is modified to support rowwise quantized input and global quantized weight
16
+ # It's purpose is fused matmul then dequantize
17
+ # It does support bias.
18
+
19
+ def init_to_zero(name):
20
+ return lambda nargs: nargs[name].zero_()
21
+
22
+ def get_configs_io_bound():
23
+ configs = []
24
+ for num_stages in [2, 3, 4, 5, 6]:
25
+ for block_m in [16, 32]:
26
+ for block_k in [32, 64]:
27
+ for block_n in [32, 64, 128, 256]:
28
+ num_warps = 2 if block_n <= 64 else 4
29
+ configs.append(
30
+ triton.Config(
31
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1},
32
+ num_stages=num_stages,
33
+ num_warps=num_warps,
34
+ ),
35
+ )
36
+ # split_k
37
+ for split_k in [2, 4, 8, 16]:
38
+ configs.append(
39
+ triton.Config(
40
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": split_k},
41
+ num_stages=num_stages,
42
+ num_warps=num_warps,
43
+ pre_hook=init_to_zero("C"),
44
+ ),
45
+ )
46
+ return configs
47
+
48
+ @triton.autotune(
49
+ configs=[
50
+ # basic configs for compute-bound matmuls
51
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
52
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
53
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
54
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
55
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
56
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
57
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
58
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
59
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2),
60
+ # good for int8
61
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
62
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
63
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
64
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
65
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
66
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
67
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
68
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
69
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2),
70
+ *get_configs_io_bound(),
71
+ ],
72
+ key=["M", "N", "K"],
73
+ prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10},
74
+ )
75
+ @triton.heuristics(
76
+ {
77
+ "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
78
+ },
79
+ )
80
+ @triton.jit
81
+ def _int8_matmul_mixed_dequantize(
82
+ A,
83
+ B,
84
+ C,
85
+ bias,
86
+ state_x_ptr,
87
+ state_w_ptr,
88
+ M,
89
+ N,
90
+ K,
91
+ divfactor: tl.constexpr,
92
+ has_bias: tl.constexpr,
93
+ stride_am,
94
+ stride_ak,
95
+ stride_bk,
96
+ stride_bn,
97
+ stride_cm,
98
+ stride_cn,
99
+ BLOCK_M: tl.constexpr,
100
+ BLOCK_N: tl.constexpr,
101
+ BLOCK_K: tl.constexpr,
102
+ GROUP_M: tl.constexpr,
103
+ SPLIT_K: tl.constexpr,
104
+ EVEN_K: tl.constexpr,
105
+ ACC_TYPE: tl.constexpr,
106
+ ):
107
+ # matrix multiplication
108
+ pid = tl.program_id(0)
109
+ pid_z = tl.program_id(1)
110
+ grid_m = tl.cdiv(M, BLOCK_M)
111
+ grid_n = tl.cdiv(N, BLOCK_N)
112
+ # re-order program ID for better L2 performance
113
+ width = GROUP_M * grid_n
114
+ group_id = pid // width
115
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
116
+ pid_m = group_id * GROUP_M + (pid % group_size)
117
+ pid_n = (pid % width) // (group_size)
118
+ # do matrix multiplication
119
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
120
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
121
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
122
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
123
+ rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
124
+ # pointers
125
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
126
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
127
+
128
+ # rematerialize rm and rn to save registers
129
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
130
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
131
+
132
+ w_factor = tl.load(state_w_ptr)
133
+ x_factor = tl.load(state_x_ptr + ram)[:, None]
134
+
135
+ # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
136
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)
137
+ for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
138
+ if EVEN_K:
139
+ a = tl.load(A)
140
+ b = tl.load(B)
141
+ else:
142
+ k_remaining = K - k * (BLOCK_K * SPLIT_K)
143
+ a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)
144
+ b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)
145
+ acc += tl.dot(a, b)
146
+ A += BLOCK_K * SPLIT_K * stride_ak
147
+ B += BLOCK_K * SPLIT_K * stride_bk
148
+
149
+ acc = w_factor * (x_factor * (acc * divfactor))
150
+ acc = acc.to(C.dtype.element_ty)
151
+
152
+ # conditionally add bias
153
+ if has_bias:
154
+ bias = tl.load(bias + rn).to(C.dtype.element_ty)
155
+ acc = acc + bias[None, :]
156
+
157
+ C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
158
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
159
+ # handles write-back with reduction-splitting
160
+ if SPLIT_K == 1:
161
+ tl.store(C, acc, mask=mask)
162
+ else:
163
+ tl.atomic_add(C, acc, mask=mask)
164
+
165
+ def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):
166
+ device = a.device
167
+ divfactor = 1.0 / (127.0 * 127.0)
168
+ has_bias = 0 if bias is None else 1
169
+ # handle non-contiguous inputs if necessary
170
+ if a.stride(0) > 1 and a.stride(1) > 1:
171
+ a = a.contiguous()
172
+ if b.stride(0) > 1 and b.stride(1) > 1:
173
+ b = b.contiguous()
174
+ # checks constraints
175
+ assert a.shape[1] == b.shape[0], "incompatible dimensions"
176
+ M, K = a.shape
177
+ _, N = b.shape
178
+ # allocates output
179
+ c = torch.empty((M, N), device=device, dtype=torch.float16)
180
+ # accumulator types
181
+ ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
182
+ # launch int8_matmul_mixed_dequantize kernel
183
+ grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), META["SPLIT_K"])
184
+ _int8_matmul_mixed_dequantize[grid](
185
+ a,
186
+ b,
187
+ c,
188
+ bias,
189
+ state_x,
190
+ state_w,
191
+ M,
192
+ N,
193
+ K,
194
+ divfactor,
195
+ has_bias,
196
+ a.stride(0),
197
+ a.stride(1),
198
+ b.stride(0),
199
+ b.stride(1),
200
+ c.stride(0),
201
+ c.stride(1),
202
+ GROUP_M=8,
203
+ ACC_TYPE=ACC_TYPE,
204
+ )
205
+ return c
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from bitsandbytes.triton.triton_utils import is_triton_available
4
+
5
+ if not is_triton_available():
6
+
7
+ def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):
8
+ return None
9
+ else:
10
+ import triton
11
+ import triton.language as tl
12
+ from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
13
+
14
+ # This is a matmul kernel based on triton.ops.matmul
15
+ # It is modified to support rowwise quantized input and columnwise quantized weight
16
+ # It's purpose is fused matmul then dequantize
17
+ # It does support bias.
18
+
19
+ def init_to_zero(name):
20
+ return lambda nargs: nargs[name].zero_()
21
+
22
+ def get_configs_io_bound():
23
+ configs = []
24
+ for num_stages in [2, 3, 4, 5, 6]:
25
+ for block_m in [16, 32]:
26
+ for block_k in [32, 64]:
27
+ for block_n in [32, 64, 128, 256]:
28
+ num_warps = 2 if block_n <= 64 else 4
29
+ configs.append(
30
+ triton.Config(
31
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1},
32
+ num_stages=num_stages,
33
+ num_warps=num_warps,
34
+ ),
35
+ )
36
+ # split_k
37
+ for split_k in [2, 4, 8, 16]:
38
+ configs.append(
39
+ triton.Config(
40
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": split_k},
41
+ num_stages=num_stages,
42
+ num_warps=num_warps,
43
+ pre_hook=init_to_zero("C"),
44
+ ),
45
+ )
46
+ return configs
47
+
48
+ @triton.autotune(
49
+ configs=[
50
+ # basic configs for compute-bound matmuls
51
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
52
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
53
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
54
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
55
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
56
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
57
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
58
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
59
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2),
60
+ # good for int8
61
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
62
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
63
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
64
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
65
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
66
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
67
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
68
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
69
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2),
70
+ *get_configs_io_bound(),
71
+ ],
72
+ key=["M", "N", "K"],
73
+ prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10},
74
+ )
75
+ @triton.heuristics(
76
+ {
77
+ "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
78
+ },
79
+ )
80
+ @triton.jit
81
+ def _int8_matmul_rowwise_dequantize(
82
+ A,
83
+ B,
84
+ C,
85
+ bias,
86
+ state_x_ptr,
87
+ state_w_ptr,
88
+ M,
89
+ N,
90
+ K,
91
+ divfactor,
92
+ has_bias: tl.constexpr,
93
+ stride_am,
94
+ stride_ak,
95
+ stride_bk,
96
+ stride_bn,
97
+ stride_cm,
98
+ stride_cn,
99
+ BLOCK_M: tl.constexpr,
100
+ BLOCK_N: tl.constexpr,
101
+ BLOCK_K: tl.constexpr,
102
+ GROUP_M: tl.constexpr,
103
+ SPLIT_K: tl.constexpr,
104
+ EVEN_K: tl.constexpr,
105
+ ACC_TYPE: tl.constexpr,
106
+ ):
107
+ # matrix multiplication
108
+ pid = tl.program_id(0)
109
+ pid_z = tl.program_id(1)
110
+ grid_m = tl.cdiv(M, BLOCK_M)
111
+ grid_n = tl.cdiv(N, BLOCK_N)
112
+ # re-order program ID for better L2 performance
113
+ width = GROUP_M * grid_n
114
+ group_id = pid // width
115
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
116
+ pid_m = group_id * GROUP_M + (pid % group_size)
117
+ pid_n = (pid % width) // (group_size)
118
+ # do matrix multiplication
119
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
120
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
121
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
122
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
123
+ rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
124
+ # pointers
125
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
126
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
127
+
128
+ # rematerialize rm and rn to save registers
129
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
130
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
131
+
132
+ w_factor = tl.load(state_w_ptr + rbn)[None, :]
133
+ x_factor = tl.load(state_x_ptr + ram)[:, None]
134
+
135
+ # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
136
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)
137
+ for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
138
+ if EVEN_K:
139
+ a = tl.load(A)
140
+ b = tl.load(B)
141
+ else:
142
+ k_remaining = K - k * (BLOCK_K * SPLIT_K)
143
+ a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)
144
+ b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)
145
+ acc += tl.dot(a, b)
146
+ A += BLOCK_K * SPLIT_K * stride_ak
147
+ B += BLOCK_K * SPLIT_K * stride_bk
148
+
149
+ acc = w_factor * (x_factor * (acc * divfactor))
150
+ acc = acc.to(C.dtype.element_ty)
151
+
152
+ if has_bias:
153
+ bias = tl.load(bias + rn).to(C.dtype.element_ty)
154
+ acc = acc + bias[None, :]
155
+
156
+ C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
157
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
158
+ # handles write-back with reduction-splitting
159
+ if SPLIT_K == 1:
160
+ tl.store(C, acc, mask=mask)
161
+ else:
162
+ tl.atomic_add(C, acc, mask=mask)
163
+
164
+ def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):
165
+ divfactor = 1.0 / (127.0 * 127.0)
166
+
167
+ has_bias = 0 if bias is None else 1
168
+
169
+ device = a.device
170
+ # handle non-contiguous inputs if necessary
171
+ if a.stride(0) > 1 and a.stride(1) > 1:
172
+ a = a.contiguous()
173
+ if b.stride(0) > 1 and b.stride(1) > 1:
174
+ b = b.contiguous()
175
+ # checks constraints
176
+ assert a.shape[1] == b.shape[0], "incompatible dimensions"
177
+ M, K = a.shape
178
+ _, N = b.shape
179
+ # allocates output
180
+ c = torch.empty((M, N), device=device, dtype=torch.float16)
181
+ # accumulator types
182
+ ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
183
+ # launch int8_matmul_rowwise_dequantize kernel
184
+ grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), META["SPLIT_K"])
185
+ _int8_matmul_rowwise_dequantize[grid](
186
+ a,
187
+ b,
188
+ c,
189
+ bias,
190
+ state_x,
191
+ state_w,
192
+ M,
193
+ N,
194
+ K,
195
+ divfactor,
196
+ has_bias,
197
+ a.stride(0),
198
+ a.stride(1),
199
+ b.stride(0),
200
+ b.stride(1),
201
+ c.stride(0),
202
+ c.stride(1),
203
+ GROUP_M=8,
204
+ ACC_TYPE=ACC_TYPE,
205
+ )
206
+ return c
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+ from bitsandbytes.triton.triton_utils import is_triton_available
6
+
7
+ if not is_triton_available():
8
+
9
+ def quantize_columnwise_and_transpose(x: torch.Tensor):
10
+ return None
11
+ else:
12
+ import triton
13
+ import triton.language as tl
14
+
15
+ # This kernel does fused columnwise quantization and transpose.
16
+
17
+ # TODO: autotune this better.
18
+ @triton.autotune(
19
+ configs=[
20
+ triton.Config({}, num_stages=1),
21
+ triton.Config({}, num_stages=2),
22
+ triton.Config({}, num_stages=4),
23
+ triton.Config({}, num_stages=8),
24
+ triton.Config({}, num_stages=16),
25
+ triton.Config({}, num_stages=1, num_warps=8),
26
+ triton.Config({}, num_stages=2, num_warps=8),
27
+ triton.Config({}, num_stages=4, num_warps=8),
28
+ triton.Config({}, num_stages=8, num_warps=8),
29
+ triton.Config({}, num_stages=16, num_warps=8),
30
+ triton.Config({}, num_warps=1),
31
+ triton.Config({}, num_warps=2),
32
+ triton.Config({}, num_warps=4),
33
+ triton.Config({}, num_warps=8),
34
+ ],
35
+ key=["n_elements"],
36
+ )
37
+ @triton.jit
38
+ def _quantize_columnwise_and_transpose(
39
+ x_ptr,
40
+ output_ptr,
41
+ output_maxs,
42
+ n_elements,
43
+ M: tl.constexpr,
44
+ N: tl.constexpr,
45
+ BLOCK_SIZE: tl.constexpr,
46
+ P2: tl.constexpr,
47
+ ):
48
+ pid = tl.program_id(axis=0)
49
+ block_start = pid
50
+ p2_arange = tl.arange(0, P2)
51
+ p2_arange_mask = p2_arange < M
52
+ arange = p2_arange * N
53
+ offsets = block_start + arange
54
+ x = tl.load(x_ptr + offsets, mask=p2_arange_mask)
55
+ abs_x = tl.abs(x)
56
+ max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0)
57
+ output = tl.libdevice.llrint(127.0 * (x / max_val))
58
+
59
+ new_start = pid * M
60
+ new_offsets = new_start + p2_arange
61
+ tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask)
62
+ tl.store(output_maxs + pid, max_val)
63
+
64
+ def quantize_columnwise_and_transpose(x: torch.Tensor):
65
+ M, N = x.shape
66
+ output = torch.empty(N, M, device=x.device, dtype=torch.int8)
67
+ output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16)
68
+
69
+ P2 = int(2 ** (math.ceil(math.log2(M))))
70
+
71
+ assert x.is_cuda and output.is_cuda
72
+ n_elements = output.numel()
73
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
74
+ _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2)
75
+ return output, output_maxs
evalkit_eagle/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from bitsandbytes.triton.triton_utils import is_triton_available
4
+
5
+ if not is_triton_available():
6
+
7
+ def quantize_global_transpose(input):
8
+ return None
9
+
10
+ def quantize_global(x: torch.Tensor):
11
+ return None
12
+ else:
13
+ import triton
14
+ import triton.language as tl
15
+
16
+ # global quantize
17
+ @triton.autotune(
18
+ configs=[
19
+ triton.Config({"BLOCK_SIZE": 1024}, num_warps=4),
20
+ triton.Config({"BLOCK_SIZE": 2048}, num_stages=1),
21
+ ],
22
+ key=["n_elements"],
23
+ )
24
+ @triton.jit
25
+ def _quantize_global(
26
+ x_ptr,
27
+ absmax_inv_ptr,
28
+ output_ptr,
29
+ n_elements,
30
+ BLOCK_SIZE: tl.constexpr,
31
+ ):
32
+ pid = tl.program_id(axis=0)
33
+ block_start = pid * BLOCK_SIZE
34
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
35
+ mask = offsets < n_elements
36
+ x = tl.load(x_ptr + offsets, mask=mask)
37
+ absmax_inv = tl.load(absmax_inv_ptr)
38
+ output = tl.libdevice.llrint(127.0 * (x * absmax_inv))
39
+ tl.store(output_ptr + offsets, output, mask=mask)
40
+
41
+ def quantize_global(x: torch.Tensor):
42
+ absmax = x.abs().max().unsqueeze(0)
43
+ absmax_inv = 1.0 / absmax
44
+ output = torch.empty(*x.shape, device="cuda", dtype=torch.int8)
45
+ assert x.is_cuda and output.is_cuda
46
+ n_elements = output.numel()
47
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
48
+ _quantize_global[grid](x, absmax_inv, output, n_elements)
49
+ return output, absmax
50
+
51
+ # global quantize and transpose
52
+ @triton.autotune(
53
+ configs=[
54
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "GROUP_M": 8}, num_warps=4),
55
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "GROUP_M": 8}, num_warps=4),
56
+ # ...
57
+ ],
58
+ key=["M", "N"],
59
+ )
60
+ @triton.jit
61
+ def _quantize_global_transpose(
62
+ A,
63
+ absmax_inv_ptr,
64
+ B,
65
+ stride_am,
66
+ stride_an,
67
+ stride_bn,
68
+ stride_bm,
69
+ M,
70
+ N,
71
+ BLOCK_M: tl.constexpr,
72
+ BLOCK_N: tl.constexpr,
73
+ GROUP_M: tl.constexpr,
74
+ ):
75
+ pid = tl.program_id(0)
76
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
77
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
78
+
79
+ width = GROUP_M * grid_n
80
+ group_id = pid // width
81
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
82
+ pid_m = group_id * GROUP_M + (pid % group_size)
83
+ pid_n = (pid % width) // group_size
84
+
85
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
86
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
87
+ A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an)
88
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
89
+ a = tl.load(A, mask=mask)
90
+ absmax_inv = tl.load(absmax_inv_ptr)
91
+
92
+ # rematerialize to save registers
93
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
94
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
95
+ B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn)
96
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
97
+
98
+ output = tl.libdevice.llrint(127.0 * (a * absmax_inv))
99
+
100
+ tl.store(B, output, mask=mask)
101
+
102
+ def quantize_global_transpose(input):
103
+ absmax = input.abs().max().unsqueeze(0)
104
+ absmax_inv = 1.0 / absmax
105
+ M, N = input.shape
106
+ out = torch.empty(N, M, device="cuda", dtype=torch.int8)
107
+
108
+ assert out.size(0) == N and out.size(1) == M
109
+ assert input.stride(0) == 1 or input.stride(1) == 1
110
+ assert out.stride(0) == 1 or out.stride(1) == 1
111
+
112
+ grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),)
113
+ _quantize_global_transpose[grid](
114
+ input,
115
+ absmax_inv,
116
+ out,
117
+ input.stride(0),
118
+ input.stride(1),
119
+ out.stride(0),
120
+ out.stride(1),
121
+ M,
122
+ N,
123
+ )
124
+ return out, absmax