ZTWHHH commited on
Commit
05d505a
·
verified ·
1 Parent(s): 70c471b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__init__.py +0 -0
  3. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc +0 -0
  5. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc +0 -0
  6. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc +0 -0
  7. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py +176 -0
  8. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py +85 -0
  9. llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py +12 -0
  10. llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py +26 -0
  11. llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  12. llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc +0 -0
  13. llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc +0 -0
  14. llava_next/lib/python3.10/site-packages/bitsandbytes/nn/modules.py +1083 -0
  15. llava_next/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py +264 -0
  16. llava_next/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  17. llava_next/lib/python3.10/site-packages/bitsandbytes/research/__init__.py +6 -0
  18. llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py +0 -0
  19. llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc +0 -0
  20. llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc +0 -0
  21. llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py +421 -0
  22. llava_next/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py +1 -0
  23. llava_next/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py +76 -0
  24. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py +0 -0
  25. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc +0 -0
  27. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc +0 -0
  28. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc +0 -0
  29. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc +0 -0
  30. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc +0 -0
  31. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc +0 -0
  32. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc +0 -0
  33. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py +64 -0
  34. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py +205 -0
  35. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py +206 -0
  36. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py +75 -0
  37. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py +124 -0
  38. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py +67 -0
  39. llava_next/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py +5 -0
  40. parrot/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -329,3 +329,4 @@ llava_next/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs me
329
  parrot/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
330
  parrot/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
331
  llava_next/lib/python3.10/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
329
  parrot/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
330
  parrot/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
331
  llava_next/lib/python3.10/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
332
+ parrot/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/cuda.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/main.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/__pycache__/utils.cpython-310.pyc ADDED
Binary file (641 Bytes). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/cuda.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, Iterable, Iterator
5
+
6
+ import torch
7
+
8
+ from bitsandbytes.cextension import get_cuda_bnb_library_path
9
+ from bitsandbytes.consts import NONPYTORCH_DOC_URL
10
+ from bitsandbytes.cuda_specs import CUDASpecs
11
+ from bitsandbytes.diagnostics.utils import print_dedented
12
+
13
+ CUDART_PATH_PREFERRED_ENVVARS = ("CONDA_PREFIX", "LD_LIBRARY_PATH")
14
+
15
+ CUDART_PATH_IGNORED_ENVVARS = {
16
+ "DBUS_SESSION_BUS_ADDRESS", # hardware related
17
+ "GOOGLE_VM_CONFIG_LOCK_FILE", # GCP: requires elevated permissions, causing problems in VMs and Jupyter notebooks
18
+ "HOME", # Linux shell default
19
+ "LESSCLOSE",
20
+ "LESSOPEN", # related to the `less` command
21
+ "MAIL", # something related to emails
22
+ "OLDPWD",
23
+ "PATH", # this is for finding binaries, not libraries
24
+ "PWD", # PWD: this is how the shell keeps track of the current working dir
25
+ "SHELL", # binary for currently invoked shell
26
+ "SSH_AUTH_SOCK", # SSH stuff, therefore unrelated
27
+ "SSH_TTY",
28
+ "TMUX", # Terminal Multiplexer
29
+ "XDG_DATA_DIRS", # XDG: Desktop environment stuff
30
+ "XDG_GREETER_DATA_DIR", # XDG: Desktop environment stuff
31
+ "XDG_RUNTIME_DIR",
32
+ "_", # current Python interpreter
33
+ }
34
+
35
+ CUDA_RUNTIME_LIB_PATTERNS = (
36
+ "cudart64*.dll", # Windows
37
+ "libcudart*.so*", # libcudart.so, libcudart.so.11.0, libcudart.so.12.0, libcudart.so.12.1, libcudart.so.12.2 etc.
38
+ "nvcuda*.dll", # Windows
39
+ )
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ def find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]:
45
+ for dir_string in paths_list_candidate.split(os.pathsep):
46
+ if not dir_string:
47
+ continue
48
+ if os.sep not in dir_string:
49
+ continue
50
+ try:
51
+ dir = Path(dir_string)
52
+ try:
53
+ if not dir.exists():
54
+ logger.warning(f"The directory listed in your path is found to be non-existent: {dir}")
55
+ continue
56
+ except OSError: # Assume an esoteric error trying to poke at the directory
57
+ pass
58
+ for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:
59
+ for pth in dir.glob(lib_pattern):
60
+ if pth.is_file():
61
+ yield pth
62
+ except (OSError, PermissionError):
63
+ pass
64
+
65
+
66
+ def is_relevant_candidate_env_var(env_var: str, value: str) -> bool:
67
+ return (
68
+ env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location
69
+ or (
70
+ os.sep in value # might contain a path
71
+ and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored
72
+ and "CONDA" not in env_var # not another conda envvar
73
+ and "BASH_FUNC" not in env_var # not a bash function defined via envvar
74
+ and "\n" not in value # likely e.g. a script or something?
75
+ )
76
+ )
77
+
78
+
79
+ def get_potentially_lib_path_containing_env_vars() -> Dict[str, str]:
80
+ return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}
81
+
82
+
83
+ def find_cudart_libraries() -> Iterator[Path]:
84
+ """
85
+ Searches for a cuda installations, in the following order of priority:
86
+ 1. active conda env
87
+ 2. LD_LIBRARY_PATH
88
+ 3. any other env vars, while ignoring those that
89
+ - are known to be unrelated
90
+ - don't contain the path separator `/`
91
+
92
+ If multiple libraries are found in part 3, we optimistically try one,
93
+ while giving a warning message.
94
+ """
95
+ candidate_env_vars = get_potentially_lib_path_containing_env_vars()
96
+
97
+ for envvar in CUDART_PATH_PREFERRED_ENVVARS:
98
+ if envvar in candidate_env_vars:
99
+ directory = candidate_env_vars[envvar]
100
+ yield from find_cuda_libraries_in_path_list(directory)
101
+ candidate_env_vars.pop(envvar)
102
+
103
+ for env_var, value in candidate_env_vars.items():
104
+ yield from find_cuda_libraries_in_path_list(value)
105
+
106
+
107
+ def print_cuda_diagnostics(cuda_specs: CUDASpecs) -> None:
108
+ print(
109
+ f"PyTorch settings found: CUDA_VERSION={cuda_specs.cuda_version_string}, "
110
+ f"Highest Compute Capability: {cuda_specs.highest_compute_capability}.",
111
+ )
112
+
113
+ binary_path = get_cuda_bnb_library_path(cuda_specs)
114
+ if not binary_path.exists():
115
+ print_dedented(
116
+ f"""
117
+ Library not found: {binary_path}. Maybe you need to compile it from source?
118
+ If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION`,
119
+ for example, `make CUDA_VERSION=113`.
120
+
121
+ The CUDA version for the compile might depend on your conda install, if using conda.
122
+ Inspect CUDA version via `conda list | grep cuda`.
123
+ """,
124
+ )
125
+
126
+ cuda_major, cuda_minor = cuda_specs.cuda_version_tuple
127
+ if cuda_major < 11:
128
+ print_dedented(
129
+ """
130
+ WARNING: CUDA versions lower than 11 are currently not supported for LLM.int8().
131
+ You will be only to use 8-bit optimizers and quantization routines!
132
+ """,
133
+ )
134
+
135
+ print(f"To manually override the PyTorch CUDA version please see: {NONPYTORCH_DOC_URL}")
136
+
137
+ # 7.5 is the minimum CC for cublaslt
138
+ if not cuda_specs.has_cublaslt:
139
+ print_dedented(
140
+ """
141
+ WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!
142
+ If you run into issues with 8-bit matmul, you can try 4-bit quantization:
143
+ https://huggingface.co/blog/4bit-transformers-bitsandbytes
144
+ """,
145
+ )
146
+
147
+ # TODO:
148
+ # (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
149
+ # (2) Multiple CUDA versions installed
150
+
151
+
152
+ def print_cuda_runtime_diagnostics() -> None:
153
+ cudart_paths = list(find_cudart_libraries())
154
+ if not cudart_paths:
155
+ print("CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.")
156
+ elif len(cudart_paths) > 1:
157
+ print_dedented(
158
+ f"""
159
+ Found duplicate CUDA runtime files (see below).
160
+
161
+ We select the PyTorch default CUDA runtime, which is {torch.version.cuda},
162
+ but this might mismatch with the CUDA version that is needed for bitsandbytes.
163
+ To override this behavior set the `BNB_CUDA_VERSION=<version string, e.g. 122>` environmental variable.
164
+
165
+ For example, if you want to use the CUDA version 122,
166
+ BNB_CUDA_VERSION=122 python ...
167
+
168
+ OR set the environmental variable in your .bashrc:
169
+ export BNB_CUDA_VERSION=122
170
+
171
+ In the case of a manual override, make sure you set LD_LIBRARY_PATH, e.g.
172
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2,
173
+ """,
174
+ )
175
+ for pth in cudart_paths:
176
+ print(f"* Found CUDA runtime at: {pth}")
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/main.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import traceback
3
+
4
+ import torch
5
+
6
+ from bitsandbytes.consts import PACKAGE_GITHUB_URL
7
+ from bitsandbytes.cuda_specs import get_cuda_specs
8
+ from bitsandbytes.diagnostics.cuda import (
9
+ print_cuda_diagnostics,
10
+ print_cuda_runtime_diagnostics,
11
+ )
12
+ from bitsandbytes.diagnostics.utils import print_dedented, print_header
13
+
14
+
15
+ def sanity_check():
16
+ from bitsandbytes.cextension import lib
17
+
18
+ if lib is None:
19
+ print_dedented(
20
+ """
21
+ Couldn't load the bitsandbytes library, likely due to missing binaries.
22
+ Please ensure bitsandbytes is properly installed.
23
+
24
+ For source installations, compile the binaries with `cmake -DCOMPUTE_BACKEND=cuda -S .`.
25
+ See the documentation for more details if needed.
26
+
27
+ Trying a simple check anyway, but this will likely fail...
28
+ """,
29
+ )
30
+
31
+ from bitsandbytes.optim import Adam
32
+
33
+ p = torch.nn.Parameter(torch.rand(10, 10).cuda())
34
+ a = torch.rand(10, 10).cuda()
35
+ p1 = p.data.sum().item()
36
+ adam = Adam([p])
37
+ out = a * p
38
+ loss = out.sum()
39
+ loss.backward()
40
+ adam.step()
41
+ p2 = p.data.sum().item()
42
+ assert p1 != p2
43
+
44
+
45
+ def main():
46
+ print_header("")
47
+ print_header("BUG REPORT INFORMATION")
48
+ print_header("")
49
+
50
+ print_header("OTHER")
51
+ cuda_specs = get_cuda_specs()
52
+ print("CUDA specs:", cuda_specs)
53
+ if not torch.cuda.is_available():
54
+ print("Torch says CUDA is not available. Possible reasons:")
55
+ print("1. CUDA driver not installed")
56
+ print("2. CUDA not installed")
57
+ print("3. You have multiple conflicting CUDA libraries")
58
+ if cuda_specs:
59
+ print_cuda_diagnostics(cuda_specs)
60
+ print_cuda_runtime_diagnostics()
61
+ print_header("")
62
+ print_header("DEBUG INFO END")
63
+ print_header("")
64
+ print("Checking that the library is importable and CUDA is callable...")
65
+ try:
66
+ sanity_check()
67
+ print("SUCCESS!")
68
+ print("Installation was successful!")
69
+ return
70
+ except ImportError:
71
+ print(
72
+ f"WARNING: {__package__} is currently running as CPU-only!\n"
73
+ "Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
74
+ f"If you think that this is so erroneously,\nplease report an issue!",
75
+ )
76
+ except Exception:
77
+ traceback.print_exc()
78
+ print_dedented(
79
+ f"""
80
+ Above we output some debug information.
81
+ Please provide this info when creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose
82
+ WARNING: Please be sure to sanitize sensitive info from the output before posting it.
83
+ """,
84
+ )
85
+ sys.exit(1)
llava_next/lib/python3.10/site-packages/bitsandbytes/diagnostics/utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textwrap
2
+
3
+ HEADER_WIDTH = 60
4
+
5
+
6
+ def print_header(txt: str, width: int = HEADER_WIDTH, filler: str = "+") -> None:
7
+ txt = f" {txt} " if txt else ""
8
+ print(txt.center(width, filler))
9
+
10
+
11
+ def print_dedented(text):
12
+ print("\n".join(textwrap.dedent(text).strip().split("\n")))
llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ from .modules import (
6
+ Embedding,
7
+ Embedding4bit,
8
+ Embedding8bit,
9
+ EmbeddingFP4,
10
+ EmbeddingNF4,
11
+ Int8Params,
12
+ Linear4bit,
13
+ Linear8bitLt,
14
+ LinearFP4,
15
+ LinearNF4,
16
+ OutlierAwareLinear,
17
+ Params4bit,
18
+ StableEmbedding,
19
+ SwitchBackLinearBnb,
20
+ )
21
+ from .triton_based_modules import (
22
+ StandardLinear,
23
+ SwitchBackLinear,
24
+ SwitchBackLinearGlobal,
25
+ SwitchBackLinearVectorwise,
26
+ )
llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (673 Bytes). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/modules.cpython-310.pyc ADDED
Binary file (28.7 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/nn/__pycache__/triton_based_modules.cpython-310.pyc ADDED
Binary file (6.93 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/nn/modules.py ADDED
@@ -0,0 +1,1083 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ #
3
+ # This source code is licensed under the MIT license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+ import copy
6
+ from typing import Any, Dict, Optional, TypeVar, Union, overload
7
+ import warnings
8
+
9
+ import torch
10
+ from torch import Tensor, device, dtype, nn
11
+ import torch.nn.functional as F
12
+
13
+ import bitsandbytes as bnb
14
+ from bitsandbytes.autograd._functions import get_tile_inds, undo_layout
15
+ from bitsandbytes.functional import QuantState
16
+ from bitsandbytes.optim import GlobalOptimManager
17
+ from bitsandbytes.utils import (
18
+ INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING,
19
+ LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING,
20
+ OutlierTracer,
21
+ )
22
+
23
+ T = TypeVar("T", bound="torch.nn.Module")
24
+
25
+
26
+ class StableEmbedding(torch.nn.Embedding):
27
+ """
28
+ Custom embedding layer designed to improve stability during training for NLP tasks by using 32-bit optimizer states. It is designed to reduce gradient variations that can result from quantization. This embedding layer is initialized with Xavier uniform initialization followed by layer normalization.
29
+
30
+ Example:
31
+
32
+ ```
33
+ # Initialize StableEmbedding layer with vocabulary size 1000, embedding dimension 300
34
+ embedding_layer = StableEmbedding(num_embeddings=1000, embedding_dim=300)
35
+
36
+ # Reset embedding parameters
37
+ embedding_layer.reset_parameters()
38
+
39
+ # Perform a forward pass with input tensor
40
+ input_tensor = torch.tensor([1, 2, 3])
41
+ output_embedding = embedding_layer(input_tensor)
42
+ ```
43
+
44
+ Attributes:
45
+ norm (`torch.nn.LayerNorm`): Layer normalization applied after the embedding.
46
+
47
+ Methods:
48
+ reset_parameters(): Reset embedding parameters using Xavier uniform initialization.
49
+ forward(input: Tensor) -> Tensor: Forward pass through the stable embedding layer.
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ num_embeddings: int,
55
+ embedding_dim: int,
56
+ padding_idx: Optional[int] = None,
57
+ max_norm: Optional[float] = None,
58
+ norm_type: float = 2.0,
59
+ scale_grad_by_freq: bool = False,
60
+ sparse: bool = False,
61
+ _weight: Optional[Tensor] = None,
62
+ device=None,
63
+ dtype=None,
64
+ ) -> None:
65
+ """
66
+ Args:
67
+ num_embeddings (`int`):
68
+ The number of unique embeddings (vocabulary size).
69
+ embedding_dim (`int`):
70
+ The dimensionality of the embedding.
71
+ padding_idx (`Optional[int]`):
72
+ Pads the output with zeros at the given index.
73
+ max_norm (`Optional[float]`):
74
+ Renormalizes embeddings to have a maximum L2 norm.
75
+ norm_type (`float`, defaults to `2.0`):
76
+ The p-norm to compute for the `max_norm` option.
77
+ scale_grad_by_freq (`bool`, defaults to `False`):
78
+ Scale gradient by frequency during backpropagation.
79
+ sparse (`bool`, defaults to `False`):
80
+ Computes dense gradients. Set to `True` to compute sparse gradients instead.
81
+ _weight (`Optional[Tensor]`):
82
+ Pretrained embeddings.
83
+ """
84
+ super().__init__(
85
+ num_embeddings,
86
+ embedding_dim,
87
+ padding_idx,
88
+ max_norm,
89
+ norm_type,
90
+ scale_grad_by_freq,
91
+ sparse,
92
+ _weight,
93
+ device,
94
+ dtype,
95
+ )
96
+ self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
97
+ GlobalOptimManager.get_instance().register_module_override(self, "weight", {"optim_bits": 32})
98
+
99
+ def reset_parameters(self) -> None:
100
+ torch.nn.init.xavier_uniform_(self.weight)
101
+ self._fill_padding_idx_with_zero()
102
+
103
+ """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
104
+ to make the Layer compatible with Pytorch < 1.9.
105
+ This means that if this changes in future PyTorch releases this need to change too
106
+ which is cumbersome. However, with this we can ensure compatibility with previous
107
+ PyTorch releases.
108
+ """
109
+
110
+ def _fill_padding_idx_with_zero(self) -> None:
111
+ if self.padding_idx is not None:
112
+ with torch.no_grad():
113
+ self.weight[self.padding_idx].fill_(0)
114
+
115
+ def forward(self, input: Tensor) -> Tensor:
116
+ emb = F.embedding(
117
+ input,
118
+ self.weight,
119
+ self.padding_idx,
120
+ self.max_norm,
121
+ self.norm_type,
122
+ self.scale_grad_by_freq,
123
+ self.sparse,
124
+ )
125
+
126
+ # always apply layer norm in full precision
127
+ emb = emb.to(torch.get_default_dtype())
128
+
129
+ return self.norm(emb).to(self.weight.dtype)
130
+
131
+
132
+ class Embedding(torch.nn.Embedding):
133
+ """
134
+ Embedding class to store and retrieve word embeddings from their indices.
135
+ """
136
+
137
+ def __init__(
138
+ self,
139
+ num_embeddings: int,
140
+ embedding_dim: int,
141
+ padding_idx: Optional[int] = None,
142
+ max_norm: Optional[float] = None,
143
+ norm_type: float = 2.0,
144
+ scale_grad_by_freq: bool = False,
145
+ sparse: bool = False,
146
+ _weight: Optional[Tensor] = None,
147
+ device: Optional[device] = None,
148
+ ) -> None:
149
+ """
150
+ Args:
151
+ num_embeddings (`int`):
152
+ The number of unique embeddings (vocabulary size).
153
+ embedding_dim (`int`):
154
+ The dimensionality of the embedding.
155
+ padding_idx (`Optional[int]`):
156
+ Pads the output with zeros at the given index.
157
+ max_norm (`Optional[float]`):
158
+ Renormalizes embeddings to have a maximum L2 norm.
159
+ norm_type (`float`, defaults to `2.0`):
160
+ The p-norm to compute for the `max_norm` option.
161
+ scale_grad_by_freq (`bool`, defaults to `False`):
162
+ Scale gradient by frequency during backpropagation.
163
+ sparse (`bool`, defaults to `False`):
164
+ Computes dense gradients. Set to `True` to compute sparse gradients instead.
165
+ _weight (`Optional[Tensor]`):
166
+ Pretrained embeddings.
167
+ """
168
+ super().__init__(
169
+ num_embeddings,
170
+ embedding_dim,
171
+ padding_idx,
172
+ max_norm,
173
+ norm_type,
174
+ scale_grad_by_freq,
175
+ sparse,
176
+ _weight,
177
+ device=device,
178
+ )
179
+ GlobalOptimManager.get_instance().register_module_override(self, "weight", {"optim_bits": 32})
180
+
181
+ def reset_parameters(self) -> None:
182
+ torch.nn.init.xavier_uniform_(self.weight)
183
+ self._fill_padding_idx_with_zero()
184
+
185
+ """ !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
186
+ to make the Layer compatible with Pytorch < 1.9.
187
+ This means that if this changes in future PyTorch releases this need to change too
188
+ which is cumbersome. However, with this we can ensure compatibility with previous
189
+ PyTorch releases.
190
+ """
191
+
192
+ def _fill_padding_idx_with_zero(self) -> None:
193
+ if self.padding_idx is not None:
194
+ with torch.no_grad():
195
+ self.weight[self.padding_idx].fill_(0)
196
+
197
+ def forward(self, input: Tensor) -> Tensor:
198
+ emb = F.embedding(
199
+ input,
200
+ self.weight,
201
+ self.padding_idx,
202
+ self.max_norm,
203
+ self.norm_type,
204
+ self.scale_grad_by_freq,
205
+ self.sparse,
206
+ )
207
+
208
+ return emb
209
+
210
+
211
+ class Params4bit(torch.nn.Parameter):
212
+ def __new__(
213
+ cls,
214
+ data: Optional[torch.Tensor] = None,
215
+ requires_grad=False, # quantized weights should be frozen by default
216
+ quant_state: Optional[QuantState] = None,
217
+ blocksize: int = 64,
218
+ compress_statistics: bool = True,
219
+ quant_type: str = "fp4",
220
+ quant_storage: torch.dtype = torch.uint8,
221
+ module: Optional["Linear4bit"] = None,
222
+ bnb_quantized: bool = False,
223
+ ) -> "Params4bit":
224
+ if data is None:
225
+ data = torch.empty(0)
226
+
227
+ self = torch.Tensor._make_subclass(cls, data, requires_grad)
228
+ self.blocksize = blocksize
229
+ self.compress_statistics = compress_statistics
230
+ self.quant_type = quant_type
231
+ self.quant_state = quant_state
232
+ self.quant_storage = quant_storage
233
+ self.bnb_quantized = bnb_quantized
234
+ self.data = data
235
+ self.module = module
236
+ return self
237
+
238
+ def __getstate__(self):
239
+ state = self.__dict__.copy()
240
+ state["data"] = self.data
241
+ state["requires_grad"] = self.requires_grad
242
+ return state
243
+
244
+ def __setstate__(self, state):
245
+ self.requires_grad = state["requires_grad"]
246
+ self.blocksize = state["blocksize"]
247
+ self.compress_statistics = state["compress_statistics"]
248
+ self.quant_type = state["quant_type"]
249
+ self.quant_state = state["quant_state"]
250
+ self.data = state["data"]
251
+ self.quant_storage = state["quant_storage"]
252
+ self.bnb_quantized = state["bnb_quantized"]
253
+ self.module = state["module"]
254
+
255
+ def __deepcopy__(self, memo):
256
+ new_instance = type(self).__new__(type(self))
257
+ state = self.__getstate__()
258
+ new_instance.__setstate__(state)
259
+ new_instance.quant_state = copy.deepcopy(state["quant_state"])
260
+ new_instance.data = copy.deepcopy(state["data"])
261
+ return new_instance
262
+
263
+ def __copy__(self):
264
+ new_instance = type(self).__new__(type(self))
265
+ state = self.__getstate__()
266
+ new_instance.__setstate__(state)
267
+ return new_instance
268
+
269
+ @classmethod
270
+ def from_prequantized(
271
+ cls,
272
+ data: torch.Tensor,
273
+ quantized_stats: Dict[str, Any],
274
+ requires_grad: bool = False,
275
+ device="cuda",
276
+ module: Optional["Linear4bit"] = None,
277
+ **kwargs,
278
+ ) -> "Params4bit":
279
+ self = torch.Tensor._make_subclass(cls, data.to(device))
280
+ self.requires_grad = requires_grad
281
+ self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)
282
+ self.blocksize = self.quant_state.blocksize
283
+ self.compress_statistics = self.quant_state.nested
284
+ self.quant_type = self.quant_state.quant_type
285
+ self.bnb_quantized = True
286
+
287
+ self.quant_storage = data.dtype
288
+ self.module = module
289
+
290
+ if self.module is not None:
291
+ self.module.quant_state = self.quant_state
292
+
293
+ return self
294
+
295
+ def _quantize(self, device):
296
+ w = self.data.contiguous().to(device)
297
+ w_4bit, quant_state = bnb.functional.quantize_4bit(
298
+ w,
299
+ blocksize=self.blocksize,
300
+ compress_statistics=self.compress_statistics,
301
+ quant_type=self.quant_type,
302
+ quant_storage=self.quant_storage,
303
+ )
304
+ self.data = w_4bit
305
+ self.quant_state = quant_state
306
+ if self.module is not None:
307
+ self.module.quant_state = quant_state
308
+ self.bnb_quantized = True
309
+ return self
310
+
311
+ def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):
312
+ return self.to(device="cuda" if device is None else device, non_blocking=non_blocking)
313
+
314
+ @overload
315
+ def to(
316
+ self: T,
317
+ device: Optional[Union[int, device]] = ...,
318
+ dtype: Optional[Union[dtype, str]] = ...,
319
+ non_blocking: bool = ...,
320
+ ) -> T: ...
321
+
322
+ @overload
323
+ def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...
324
+
325
+ @overload
326
+ def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...
327
+
328
+ def to(self, *args, **kwargs):
329
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
330
+
331
+ if device is not None and device.type == "cuda" and not self.bnb_quantized:
332
+ return self._quantize(device)
333
+ else:
334
+ if self.quant_state is not None:
335
+ self.quant_state.to(device)
336
+
337
+ new_param = Params4bit(
338
+ super().to(device=device, dtype=dtype, non_blocking=non_blocking),
339
+ requires_grad=self.requires_grad,
340
+ quant_state=self.quant_state,
341
+ blocksize=self.blocksize,
342
+ compress_statistics=self.compress_statistics,
343
+ quant_type=self.quant_type,
344
+ quant_storage=self.quant_storage,
345
+ )
346
+
347
+ return new_param
348
+
349
+
350
+ def fix_4bit_weight_quant_state_from_module(module: Union["Embedding4bit", "Linear4bit"]):
351
+ if getattr(module.weight, "quant_state", None) is not None:
352
+ return
353
+
354
+ if getattr(module, "quant_state", None) is None:
355
+ warnings.warn(
356
+ "FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.",
357
+ )
358
+
359
+ # the quant state got lost when the parameter got converted. This happens for example for fsdp
360
+ # since we registered the module, we can recover the state here
361
+ assert module.weight.shape[1] == 1
362
+ if not isinstance(module.weight, Params4bit):
363
+ module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)
364
+ module.weight.quant_state = module.quant_state
365
+
366
+
367
+ class Linear4bit(nn.Linear):
368
+ """
369
+ This class is the base module for the 4-bit quantization algorithm presented in [QLoRA](https://arxiv.org/abs/2305.14314).
370
+ QLoRA 4-bit linear layers uses blockwise k-bit quantization under the hood, with the possibility of selecting various
371
+ compute datatypes such as FP4 and NF4.
372
+
373
+ In order to quantize a linear layer one should first load the original fp16 / bf16 weights into
374
+ the Linear4bit module, then call `quantized_module.to("cuda")` to quantize the fp16 / bf16 weights.
375
+
376
+ Example:
377
+
378
+ ```python
379
+ import torch
380
+ import torch.nn as nn
381
+
382
+ import bitsandbytes as bnb
383
+ from bnb.nn import Linear4bit
384
+
385
+ fp16_model = nn.Sequential(
386
+ nn.Linear(64, 64),
387
+ nn.Linear(64, 64)
388
+ )
389
+
390
+ quantized_model = nn.Sequential(
391
+ Linear4bit(64, 64),
392
+ Linear4bit(64, 64)
393
+ )
394
+
395
+ quantized_model.load_state_dict(fp16_model.state_dict())
396
+ quantized_model = quantized_model.to(0) # Quantization happens here
397
+ ```
398
+ """
399
+
400
+ def __init__(
401
+ self,
402
+ input_features,
403
+ output_features,
404
+ bias=True,
405
+ compute_dtype=None,
406
+ compress_statistics=True,
407
+ quant_type="fp4",
408
+ quant_storage=torch.uint8,
409
+ device=None,
410
+ ):
411
+ """
412
+ Initialize Linear4bit class.
413
+
414
+ Args:
415
+ input_features (`str`):
416
+ Number of input features of the linear layer.
417
+ output_features (`str`):
418
+ Number of output features of the linear layer.
419
+ bias (`bool`, defaults to `True`):
420
+ Whether the linear class uses the bias term as well.
421
+ """
422
+ super().__init__(input_features, output_features, bias, device)
423
+ self.weight = Params4bit(
424
+ self.weight.data,
425
+ requires_grad=False,
426
+ compress_statistics=compress_statistics,
427
+ quant_type=quant_type,
428
+ quant_storage=quant_storage,
429
+ module=self,
430
+ )
431
+ # self.persistent_buffers = [] # TODO consider as way to save quant state
432
+ self.compute_dtype = compute_dtype
433
+ self.compute_type_is_set = False
434
+ self.quant_state = None
435
+ self.quant_storage = quant_storage
436
+
437
+ def set_compute_type(self, x):
438
+ if x.dtype in [torch.float32, torch.bfloat16]:
439
+ # the input is in a dtype that is safe to compute in, we switch
440
+ # to this type for speed and stability
441
+ self.compute_dtype = x.dtype
442
+ elif x.dtype == torch.float16:
443
+ # we take the compoute dtype passed into the layer
444
+ if self.compute_dtype == torch.float32 and (x.numel() == x.shape[-1]):
445
+ # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast
446
+ # warn the user about this
447
+ warnings.warn(
448
+ "Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.",
449
+ )
450
+ warnings.filterwarnings("ignore", message=".*inference.")
451
+ if self.compute_dtype == torch.float32 and (x.numel() != x.shape[-1]):
452
+ warnings.warn(
453
+ "Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.",
454
+ )
455
+ warnings.filterwarnings("ignore", message=".*inference or training")
456
+
457
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
458
+ """
459
+ save weight and bias,
460
+ then fill state_dict with components of quant_state
461
+ """
462
+ super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias
463
+
464
+ if getattr(self.weight, "quant_state", None) is not None:
465
+ for k, v in self.weight.quant_state.as_dict(packed=True).items():
466
+ destination[prefix + "weight." + k] = v if keep_vars else v.detach()
467
+
468
+ def forward(self, x: torch.Tensor):
469
+ fix_4bit_weight_quant_state_from_module(self)
470
+
471
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
472
+ if self.bias is not None and self.bias.dtype != x.dtype:
473
+ self.bias.data = self.bias.data.to(x.dtype)
474
+
475
+ if not self.compute_type_is_set:
476
+ self.set_compute_type(x)
477
+ self.compute_type_is_set = True
478
+
479
+ inp_dtype = x.dtype
480
+ if self.compute_dtype is not None:
481
+ x = x.to(self.compute_dtype)
482
+
483
+ bias = None if self.bias is None else self.bias.to(self.compute_dtype)
484
+ out = bnb.matmul_4bit(x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state)
485
+
486
+ out = out.to(inp_dtype)
487
+
488
+ return out
489
+
490
+
491
+ class LinearFP4(Linear4bit):
492
+ """
493
+ Implements the FP4 data type.
494
+ """
495
+
496
+ def __init__(
497
+ self,
498
+ input_features,
499
+ output_features,
500
+ bias=True,
501
+ compute_dtype=None,
502
+ compress_statistics=True,
503
+ quant_storage=torch.uint8,
504
+ device=None,
505
+ ):
506
+ """
507
+ Args:
508
+ input_features (`str`):
509
+ Number of input features of the linear layer.
510
+ output_features (`str`):
511
+ Number of output features of the linear layer.
512
+ bias (`bool`, defaults to `True`):
513
+ Whether the linear class uses the bias term as well.
514
+ """
515
+ super().__init__(
516
+ input_features,
517
+ output_features,
518
+ bias,
519
+ compute_dtype,
520
+ compress_statistics,
521
+ "fp4",
522
+ quant_storage,
523
+ device,
524
+ )
525
+
526
+
527
+ class LinearNF4(Linear4bit):
528
+ """Implements the NF4 data type.
529
+
530
+ Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that
531
+ is normalized into the range [-1, 1].
532
+
533
+ For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)
534
+
535
+ Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in
536
+ the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.
537
+ """
538
+
539
+ def __init__(
540
+ self,
541
+ input_features,
542
+ output_features,
543
+ bias=True,
544
+ compute_dtype=None,
545
+ compress_statistics=True,
546
+ quant_storage=torch.uint8,
547
+ device=None,
548
+ ):
549
+ """
550
+ Args:
551
+ input_features (`str`):
552
+ Number of input features of the linear layer.
553
+ output_features (`str`):
554
+ Number of output features of the linear layer.
555
+ bias (`bool`, defaults to `True`):
556
+ Whether the linear class uses the bias term as well.
557
+ """
558
+ super().__init__(
559
+ input_features,
560
+ output_features,
561
+ bias,
562
+ compute_dtype,
563
+ compress_statistics,
564
+ "nf4",
565
+ quant_storage,
566
+ device,
567
+ )
568
+
569
+
570
+ class Int8Params(torch.nn.Parameter):
571
+ def __new__(
572
+ cls,
573
+ data=None,
574
+ requires_grad=True,
575
+ has_fp16_weights=False,
576
+ CB=None,
577
+ SCB=None,
578
+ ):
579
+ if data is None:
580
+ data = torch.empty(0)
581
+ obj = torch.Tensor._make_subclass(cls, data, requires_grad)
582
+ obj.CB = CB
583
+ obj.SCB = SCB
584
+ obj.has_fp16_weights = has_fp16_weights
585
+ return obj
586
+
587
+ def cuda(self, device):
588
+ if self.has_fp16_weights:
589
+ return super().cuda(device)
590
+ else:
591
+ # we store the 8-bit rows-major weight
592
+ # we convert this weight to the turning/ampere weight during the first inference pass
593
+ B = self.data.contiguous().half().cuda(device)
594
+ CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
595
+ del CBt
596
+ del SCBt
597
+ self.data = CB
598
+ self.CB = CB
599
+ self.SCB = SCB
600
+
601
+ return self
602
+
603
+ def __deepcopy__(self, memo):
604
+ # adjust this if new arguments are added to the constructor
605
+ new_instance = type(self).__new__(
606
+ type(self),
607
+ data=copy.deepcopy(self.data, memo),
608
+ requires_grad=self.requires_grad,
609
+ has_fp16_weights=self.has_fp16_weights,
610
+ CB=copy.deepcopy(self.CB, memo),
611
+ SCB=copy.deepcopy(self.SCB, memo),
612
+ )
613
+ return new_instance
614
+
615
+ @overload
616
+ def to(
617
+ self: T,
618
+ device: Optional[Union[int, device]] = ...,
619
+ dtype: Optional[Union[dtype, str]] = ...,
620
+ non_blocking: bool = ...,
621
+ ) -> T: ...
622
+
623
+ @overload
624
+ def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...
625
+
626
+ @overload
627
+ def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...
628
+
629
+ def to(self, *args, **kwargs):
630
+ device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)
631
+
632
+ if device is not None and device.type == "cuda" and self.data.device.type == "cpu":
633
+ return self.cuda(device)
634
+ else:
635
+ new_param = Int8Params(
636
+ super().to(device=device, dtype=dtype, non_blocking=non_blocking),
637
+ requires_grad=self.requires_grad,
638
+ has_fp16_weights=self.has_fp16_weights,
639
+ )
640
+ new_param.CB = self.CB
641
+ new_param.SCB = self.SCB
642
+
643
+ return new_param
644
+
645
+
646
+ def maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
647
+ weight = state_dict.get(f"{prefix}weight")
648
+ if weight is None:
649
+ # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing
650
+ return
651
+ weight_format = state_dict.pop(f"{prefix}weight_format", "row")
652
+
653
+ if isinstance(weight_format, torch.Tensor):
654
+ weight_format = weight_format.item()
655
+
656
+ # For new weights format storage type, we explicitly check
657
+ # if weights_format is on the mapping
658
+ if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:
659
+ raise ValueError(f"Expected supported weight format - got {weight_format}")
660
+ elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:
661
+ weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format]
662
+
663
+ if weight_format != "row":
664
+ tile_indices = get_tile_inds(weight_format, weight.device)
665
+ state_dict[f"{prefix}weight"] = undo_layout(weight, tile_indices)
666
+
667
+
668
+ class Embedding8bit(nn.Embedding):
669
+ """
670
+ This class implements [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm for embedding layer
671
+
672
+ Quantization API is similar to Linear8bitLt:
673
+ ```python
674
+ import torch
675
+ import torch.nn as nn
676
+
677
+ from bitsandbytes.nn import Embedding8bit
678
+
679
+ fp16_module = nn.Embedding(128, 64)
680
+ int8_module = Embedding8bit(128, 64)
681
+
682
+ int8_module.load_state_dict(fp16_module.state_dict())
683
+
684
+ int8_module = int8_module.to(0) # Quantization happens here
685
+ ```
686
+ """
687
+
688
+ def __init__(self, num_embeddings, embedding_dim, device=None, dtype=None):
689
+ super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)
690
+ self.dtype = self.weight.data.dtype
691
+
692
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=False, requires_grad=False)
693
+
694
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
695
+ raise NotImplementedError("Saving Embedding8bit module is not implemented")
696
+
697
+ def forward(self, input: Tensor) -> Tensor:
698
+ if not hasattr(self.weight, "SCB"):
699
+ raise RuntimeError("Embedding layer is not quantized. Please call .cuda() or .to(device) first.")
700
+
701
+ rows = self.weight.data
702
+ row_stats = self.weight.SCB
703
+
704
+ assert rows.shape == (self.num_embeddings, self.embedding_dim)
705
+ assert row_stats.shape == (self.num_embeddings,)
706
+
707
+ compressed_output = F.embedding(input, rows)
708
+ compressed_output_stats = F.embedding(input, row_stats.view(self.num_embeddings, 1))
709
+
710
+ output = compressed_output * (compressed_output_stats / 127.0)
711
+
712
+ return output.to(self.dtype)
713
+
714
+
715
+ class Embedding4bit(nn.Embedding):
716
+ """
717
+ This is the base class similar to Linear4bit. It implements the 4-bit quantization algorithm presented in
718
+ [QLoRA](https://arxiv.org/abs/2305.14314) for embeddings.
719
+
720
+ Quantization API is similar to Linear4bit:
721
+ ```python
722
+ import torch
723
+ import torch.nn as nn
724
+
725
+ from bitsandbytes.nn import Embedding4bit
726
+
727
+ fp16_module = nn.Embedding(128, 64)
728
+ quantized_module = Embedding4bit(128, 64)
729
+
730
+ quantized_module.load_state_dict(fp16_module.state_dict())
731
+
732
+ quantized_module = quantized_module.to(0) # Quantization happens here
733
+ ```
734
+ """
735
+
736
+ def __init__(
737
+ self,
738
+ num_embeddings,
739
+ embedding_dim,
740
+ dtype=None,
741
+ quant_type="fp4",
742
+ quant_storage=torch.uint8,
743
+ device=None,
744
+ ):
745
+ super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)
746
+ self.dtype = self.weight.data.dtype
747
+
748
+ self.weight = Params4bit(
749
+ self.weight.data,
750
+ requires_grad=False,
751
+ compress_statistics=None,
752
+ quant_type=quant_type,
753
+ quant_storage=quant_storage,
754
+ module=self,
755
+ )
756
+
757
+ blocksize = self.weight.blocksize
758
+
759
+ if embedding_dim % blocksize != 0:
760
+ warnings.warn(
761
+ f"Embedding size {embedding_dim} is not divisible by block size {blocksize}. "
762
+ "This will lead to slow inference.",
763
+ )
764
+
765
+ def _forward_with_partial_dequantize(self, input: Tensor):
766
+ assert self.embedding_dim % self.weight.quant_state.blocksize == 0
767
+
768
+ w_4bit_uint8 = self.weight.data.view(torch.uint8).view(self.num_embeddings * self.embedding_dim // 2, 1)
769
+
770
+ output_4bit = torch.nn.functional.embedding(
771
+ weight=w_4bit_uint8.view(self.num_embeddings, self.embedding_dim // 2),
772
+ input=input,
773
+ ).view(-1, 1)
774
+ assert output_4bit.shape == (input.numel() * self.embedding_dim // 2, 1)
775
+
776
+ blocks_per_emb = self.embedding_dim // self.weight.blocksize
777
+
778
+ absmax = self.weight.quant_state.absmax
779
+ assert absmax.shape == (self.num_embeddings * blocks_per_emb,)
780
+
781
+ output_absmax = torch.nn.functional.embedding(
782
+ weight=absmax.view(self.num_embeddings, blocks_per_emb),
783
+ input=input,
784
+ ).view(
785
+ -1,
786
+ )
787
+ assert output_absmax.shape == (input.numel() * blocks_per_emb,)
788
+
789
+ output_quant_state = copy.deepcopy(self.weight.quant_state)
790
+ output_quant_state.absmax = output_absmax
791
+ output_quant_state.shape = torch.Size((*input.shape, self.embedding_dim))
792
+
793
+ output = bnb.functional.dequantize_4bit(output_4bit, output_quant_state)
794
+ assert output.shape == (*input.shape, self.embedding_dim)
795
+
796
+ return output.to(self.dtype)
797
+
798
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
799
+ raise NotImplementedError("Saving Embedding4bit module is not implemented")
800
+
801
+ def forward(self, input: Tensor) -> Tensor:
802
+ fix_4bit_weight_quant_state_from_module(self)
803
+
804
+ if self.embedding_dim % self.weight.quant_state.blocksize == 0:
805
+ return self._forward_with_partial_dequantize(input)
806
+
807
+ dequantized_weight = bnb.functional.dequantize_4bit(self.weight.data, self.weight.quant_state)
808
+
809
+ return torch.nn.functional.embedding(
810
+ weight=dequantized_weight,
811
+ input=input,
812
+ ).to(self.dtype)
813
+
814
+
815
+ class EmbeddingFP4(Embedding4bit):
816
+ def __init__(
817
+ self,
818
+ num_embeddings,
819
+ embedding_dim,
820
+ dtype=None,
821
+ quant_storage=torch.uint8,
822
+ device=None,
823
+ ):
824
+ super().__init__(
825
+ num_embeddings,
826
+ embedding_dim,
827
+ dtype=dtype,
828
+ quant_type="fp4",
829
+ quant_storage=quant_storage,
830
+ device=device,
831
+ )
832
+
833
+
834
+ class EmbeddingNF4(Embedding4bit):
835
+ def __init__(
836
+ self,
837
+ num_embeddings,
838
+ embedding_dim,
839
+ dtype=None,
840
+ quant_storage=torch.uint8,
841
+ device=None,
842
+ ):
843
+ super().__init__(
844
+ num_embeddings,
845
+ embedding_dim,
846
+ dtype=dtype,
847
+ quant_type="nf4",
848
+ quant_storage=quant_storage,
849
+ device=device,
850
+ )
851
+
852
+
853
+ class Linear8bitLt(nn.Linear):
854
+ """
855
+ This class is the base module for the [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm.
856
+ To read more about it, have a look at the paper.
857
+
858
+ In order to quantize a linear layer one should first load the original fp16 / bf16 weights into
859
+ the Linear8bitLt module, then call `int8_module.to("cuda")` to quantize the fp16 weights.
860
+
861
+ Example:
862
+
863
+ ```python
864
+ import torch
865
+ import torch.nn as nn
866
+
867
+ import bitsandbytes as bnb
868
+ from bnb.nn import Linear8bitLt
869
+
870
+ fp16_model = nn.Sequential(
871
+ nn.Linear(64, 64),
872
+ nn.Linear(64, 64)
873
+ )
874
+
875
+ int8_model = nn.Sequential(
876
+ Linear8bitLt(64, 64, has_fp16_weights=False),
877
+ Linear8bitLt(64, 64, has_fp16_weights=False)
878
+ )
879
+
880
+ int8_model.load_state_dict(fp16_model.state_dict())
881
+ int8_model = int8_model.to(0) # Quantization happens here
882
+ ```
883
+ """
884
+
885
+ def __init__(
886
+ self,
887
+ input_features: int,
888
+ output_features: int,
889
+ bias=True,
890
+ has_fp16_weights=True,
891
+ memory_efficient_backward=False,
892
+ threshold=0.0,
893
+ index=None,
894
+ device=None,
895
+ ):
896
+ """
897
+ Initialize Linear8bitLt class.
898
+
899
+ Args:
900
+ input_features (`int`):
901
+ Number of input features of the linear layer.
902
+ output_features (`int`):
903
+ Number of output features of the linear layer.
904
+ bias (`bool`, defaults to `True`):
905
+ Whether the linear class uses the bias term as well.
906
+ """
907
+ super().__init__(input_features, output_features, bias, device)
908
+ assert not memory_efficient_backward, "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
909
+ self.state = bnb.MatmulLtState()
910
+ self.index = index
911
+
912
+ self.state.threshold = threshold
913
+ self.state.has_fp16_weights = has_fp16_weights
914
+ self.state.memory_efficient_backward = memory_efficient_backward
915
+ if threshold > 0.0 and not has_fp16_weights:
916
+ self.state.use_pool = True
917
+
918
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
919
+ self._register_load_state_dict_pre_hook(maybe_rearrange_weight)
920
+
921
+ def _save_to_state_dict(self, destination, prefix, keep_vars):
922
+ super()._save_to_state_dict(destination, prefix, keep_vars)
923
+
924
+ # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data
925
+ scb_name = "SCB"
926
+
927
+ # case 1: .cuda was called, SCB is in self.weight
928
+ param_from_weight = getattr(self.weight, scb_name)
929
+ # case 2: self.init_8bit_state was called, SCB is in self.state
930
+ param_from_state = getattr(self.state, scb_name)
931
+ # case 3: SCB is in self.state, weight layout reordered after first forward()
932
+ layout_reordered = self.state.CxB is not None
933
+
934
+ key_name = prefix + f"{scb_name}"
935
+ format_name = prefix + "weight_format"
936
+
937
+ if not self.state.has_fp16_weights:
938
+ if param_from_weight is not None:
939
+ destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()
940
+ destination[format_name] = torch.tensor(0, dtype=torch.uint8)
941
+ elif param_from_state is not None and not layout_reordered:
942
+ destination[key_name] = param_from_state if keep_vars else param_from_state.detach()
943
+ destination[format_name] = torch.tensor(0, dtype=torch.uint8)
944
+ elif param_from_state is not None:
945
+ destination[key_name] = param_from_state if keep_vars else param_from_state.detach()
946
+ weights_format = self.state.formatB
947
+ # At this point `weights_format` is an str
948
+ if weights_format not in LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:
949
+ raise ValueError(f"Unrecognized weights format {weights_format}")
950
+
951
+ weights_format = LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weights_format]
952
+
953
+ destination[format_name] = torch.tensor(weights_format, dtype=torch.uint8)
954
+
955
+ def _load_from_state_dict(
956
+ self,
957
+ state_dict,
958
+ prefix,
959
+ local_metadata,
960
+ strict,
961
+ missing_keys,
962
+ unexpected_keys,
963
+ error_msgs,
964
+ ):
965
+ super()._load_from_state_dict(
966
+ state_dict,
967
+ prefix,
968
+ local_metadata,
969
+ strict,
970
+ missing_keys,
971
+ unexpected_keys,
972
+ error_msgs,
973
+ )
974
+ unexpected_copy = list(unexpected_keys)
975
+
976
+ for key in unexpected_copy:
977
+ input_name = key[len(prefix) :]
978
+ if input_name == "SCB":
979
+ if self.weight.SCB is None:
980
+ # buffers not yet initialized, can't access them directly without quantizing first
981
+ raise RuntimeError(
982
+ "Loading a quantized checkpoint into non-quantized Linear8bitLt is "
983
+ "not supported. Please call module.cuda() before module.load_state_dict()",
984
+ )
985
+
986
+ input_param = state_dict[key]
987
+ self.weight.SCB.copy_(input_param)
988
+
989
+ if self.state.SCB is not None:
990
+ self.state.SCB = self.weight.SCB
991
+
992
+ unexpected_keys.remove(key)
993
+
994
+ def init_8bit_state(self):
995
+ self.state.CB = self.weight.CB
996
+ self.state.SCB = self.weight.SCB
997
+ self.weight.CB = None
998
+ self.weight.SCB = None
999
+
1000
+ def forward(self, x: torch.Tensor):
1001
+ self.state.is_training = self.training
1002
+ if self.weight.CB is not None:
1003
+ self.init_8bit_state()
1004
+
1005
+ # weights are cast automatically as Int8Params, but the bias has to be cast manually
1006
+ if self.bias is not None and self.bias.dtype != x.dtype:
1007
+ self.bias.data = self.bias.data.to(x.dtype)
1008
+
1009
+ out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
1010
+
1011
+ if not self.state.has_fp16_weights:
1012
+ if self.state.CB is not None and self.state.CxB is not None:
1013
+ # we converted 8-bit row major to turing/ampere format in the first inference pass
1014
+ # we no longer need the row-major weight
1015
+ del self.state.CB
1016
+ self.weight.data = self.state.CxB
1017
+ return out
1018
+
1019
+
1020
+ class OutlierAwareLinear(nn.Linear):
1021
+ def __init__(self, input_features, output_features, bias=True, device=None):
1022
+ super().__init__(input_features, output_features, bias, device)
1023
+ self.outlier_dim = None
1024
+ self.is_quantized = False
1025
+
1026
+ def forward_with_outliers(self, x, outlier_idx):
1027
+ raise NotImplementedError("Please override the `forward_with_outliers(self, x, outlier_idx)` function")
1028
+
1029
+ def quantize_weight(self, w, outlier_idx):
1030
+ raise NotImplementedError("Please override the `quantize_weights(self, w, outlier_idx)` function")
1031
+
1032
+ def forward(self, x):
1033
+ if self.outlier_dim is None:
1034
+ tracer = OutlierTracer.get_instance()
1035
+ if not tracer.is_initialized():
1036
+ print("Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer")
1037
+ outlier_idx = tracer.get_outliers(self.weight)
1038
+ # print(outlier_idx, tracer.get_hvalue(self.weight))
1039
+ self.outlier_dim = outlier_idx
1040
+
1041
+ if not self.is_quantized:
1042
+ w = self.quantize_weight(self.weight, self.outlier_dim)
1043
+ self.weight.data.copy_(w)
1044
+ self.is_quantized = True
1045
+
1046
+
1047
+ class SwitchBackLinearBnb(nn.Linear):
1048
+ def __init__(
1049
+ self,
1050
+ input_features,
1051
+ output_features,
1052
+ bias=True,
1053
+ has_fp16_weights=True,
1054
+ memory_efficient_backward=False,
1055
+ threshold=0.0,
1056
+ index=None,
1057
+ device=None,
1058
+ ):
1059
+ super().__init__(input_features, output_features, bias, device)
1060
+ self.state = bnb.MatmulLtState()
1061
+ self.index = index
1062
+
1063
+ self.state.threshold = threshold
1064
+ self.state.has_fp16_weights = has_fp16_weights
1065
+ self.state.memory_efficient_backward = memory_efficient_backward
1066
+ if threshold > 0.0 and not has_fp16_weights:
1067
+ self.state.use_pool = True
1068
+
1069
+ self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)
1070
+
1071
+ def init_8bit_state(self):
1072
+ self.state.CB = self.weight.CB
1073
+ self.state.SCB = self.weight.SCB
1074
+ self.weight.CB = None
1075
+ self.weight.SCB = None
1076
+
1077
+ def forward(self, x):
1078
+ self.state.is_training = self.training
1079
+
1080
+ if self.weight.CB is not None:
1081
+ self.init_8bit_state()
1082
+
1083
+ out = bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias
llava_next/lib/python3.10/site-packages/bitsandbytes/nn/triton_based_modules.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+ from bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise
7
+ from bitsandbytes.triton.int8_matmul_mixed_dequantize import (
8
+ int8_matmul_mixed_dequantize,
9
+ )
10
+ from bitsandbytes.triton.int8_matmul_rowwise_dequantize import (
11
+ int8_matmul_rowwise_dequantize,
12
+ )
13
+ from bitsandbytes.triton.quantize_columnwise_and_transpose import (
14
+ quantize_columnwise_and_transpose,
15
+ )
16
+ from bitsandbytes.triton.quantize_global import (
17
+ quantize_global,
18
+ quantize_global_transpose,
19
+ )
20
+ from bitsandbytes.triton.quantize_rowwise import quantize_rowwise
21
+ from bitsandbytes.triton.triton_utils import is_triton_available
22
+
23
+
24
+ class _switchback_global(torch.autograd.Function):
25
+ @staticmethod
26
+ def forward(ctx, X_3D, W, bias):
27
+ # reshape input to [N * L, D]
28
+ X = X_3D.view(-1, X_3D.size(-1))
29
+
30
+ # rowwise quantize for X, global quantize for W
31
+ X_int8, state_X = quantize_rowwise(X)
32
+ W_int8, state_W = quantize_global(W)
33
+
34
+ # save for backward.
35
+ ctx.save_for_backward = X, W
36
+
37
+ # matmult, fused dequant and add bias
38
+ # call "mixed" because we are mixing rowwise quantized and global quantized
39
+ return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)
40
+
41
+ @staticmethod
42
+ def backward(ctx, G_3D):
43
+ # reshape input to [N_out * L, D]
44
+ G = G_3D.reshape(-1, G_3D.size(-1))
45
+
46
+ grad_X = grad_W = grad_bias = None
47
+
48
+ X, W = ctx.save_for_backward
49
+ if ctx.needs_input_grad[0]:
50
+ # rowwise quantize for G, global quantize for W
51
+ # for W, we also fuse the transpose operation because only A @ B^T is supported
52
+ # so we transpose once then call .t() in the matmul
53
+ G_int8, state_G = quantize_rowwise(G)
54
+ W_int8, state_W = quantize_global_transpose(W)
55
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
56
+ *G_3D.size()[:-1],
57
+ -1,
58
+ )
59
+ if ctx.needs_input_grad[1]:
60
+ # backward pass uses standard weight grad
61
+ grad_W = torch.matmul(G.t(), X.to(G.dtype))
62
+ if ctx.needs_input_grad[2]:
63
+ grad_bias = G.sum(dim=0)
64
+
65
+ return grad_X, grad_W, grad_bias
66
+
67
+
68
+ class _switchback_vectorrize(torch.autograd.Function):
69
+ @staticmethod
70
+ def forward(ctx, X_3D, W, bias):
71
+ # reshape input to [N * L, D]
72
+ X = X_3D.view(-1, X_3D.size(-1))
73
+
74
+ ctx.save_for_backward = X, W
75
+ # rowwise quantize for X
76
+ # columnwise quantize for W (first rowwise, transpose later)
77
+ X_int8, state_X = quantize_rowwise(X)
78
+ W_int8, state_W = quantize_rowwise(W)
79
+
80
+ # matmult, fused dequant and add bias
81
+ # call kernel which expects rowwise quantized X and W
82
+ return int8_matmul_rowwise_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)
83
+
84
+ @staticmethod
85
+ def backward(ctx, G_3D):
86
+ X, W = ctx.save_for_backward
87
+
88
+ G = G_3D.reshape(-1, G_3D.size(-1))
89
+
90
+ grad_X = grad_W = grad_bias = None
91
+
92
+ if ctx.needs_input_grad[0]:
93
+ # rowwise quantize for G, columnwise quantize for W and fused transpose
94
+ # we call .t() for weight later because only A @ B^T is supported
95
+ G_int8, state_G = quantize_rowwise(G)
96
+ W_int8, state_W = quantize_columnwise_and_transpose(W)
97
+ grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(
98
+ *G_3D.size()[:-1],
99
+ -1,
100
+ )
101
+ if ctx.needs_input_grad[1]:
102
+ # backward pass uses standard weight grad
103
+ grad_W = torch.matmul(G.t(), X.to(G.dtype))
104
+ if ctx.needs_input_grad[2]:
105
+ grad_bias = G.sum(dim=0)
106
+
107
+ return grad_X, grad_W, grad_bias
108
+
109
+
110
+ class _switchback_global_mem_efficient(torch.autograd.Function):
111
+ @staticmethod
112
+ def forward(ctx, X_3D, W, bias):
113
+ # reshape input to [N * L, D]
114
+ X = X_3D.view(-1, X_3D.size(-1))
115
+ X_3D_sz = X_3D.size()
116
+
117
+ # rowwise quantize for X, global quantize for W
118
+ X_int8, state_X = quantize_rowwise(X)
119
+ del X
120
+ W_int8, state_W = quantize_global(W)
121
+
122
+ # save for backward.
123
+ ctx.save_for_backward = X_int8, state_X, W_int8, state_W
124
+
125
+ # matmult, fused dequant and add bias
126
+ # call "mixed" because we are mixing rowwise quantized and global quantized
127
+ return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D_sz[:-1], -1)
128
+
129
+ @staticmethod
130
+ def backward(ctx, G_3D):
131
+ # reshape input to [N_out * L, D]
132
+ G = G_3D.reshape(-1, G_3D.size(-1))
133
+ G_3D_sz = G_3D.size()
134
+
135
+ grad_X = grad_W = grad_bias = None
136
+
137
+ X_int8, state_X, W_int8, state_W = ctx.save_for_backward
138
+ if ctx.needs_input_grad[1]:
139
+ real_X = dequantize_rowwise(X_int8, state_X)
140
+ del X_int8
141
+ grad_W = torch.matmul(G.t(), real_X.to(G.dtype))
142
+ del real_X
143
+ if ctx.needs_input_grad[2]:
144
+ grad_bias = G.sum(dim=0)
145
+ if ctx.needs_input_grad[0]:
146
+ G_int8, state_G = quantize_rowwise(G)
147
+ del G
148
+ W_int8 = W_int8.t().contiguous()
149
+ grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1)
150
+
151
+ return grad_X, grad_W, grad_bias
152
+
153
+
154
+ class SwitchBackLinear(nn.Linear):
155
+ def __init__(
156
+ self,
157
+ in_features: int,
158
+ out_features: int,
159
+ bias: bool = True,
160
+ device=None,
161
+ dtype=None,
162
+ vector_wise_quantization: bool = False,
163
+ mem_efficient: bool = False,
164
+ ):
165
+ super().__init__(in_features, out_features, bias, device, dtype)
166
+
167
+ if not is_triton_available():
168
+ raise ImportError("""Could not import triton. Please install triton to use SwitchBackLinear.
169
+ Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower""")
170
+
171
+ # By default, we use the global quantization.
172
+ self.vector_wise_quantization = vector_wise_quantization
173
+ if self.vector_wise_quantization:
174
+ self._fn = _switchback_vectorrize
175
+ if mem_efficient:
176
+ print("mem efficient is not supported for vector-wise quantization.")
177
+ exit(1)
178
+ else:
179
+ if mem_efficient:
180
+ self._fn = _switchback_global_mem_efficient
181
+ else:
182
+ self._fn = _switchback_global
183
+
184
+ def prepare_for_eval(self):
185
+ # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.
186
+ # Note this is experimental and not tested thoroughly.
187
+ # Note this needs to be explicitly called with something like
188
+ # def cond_prepare(m):
189
+ # if hasattr(m, "prepare_for_eval"):
190
+ # m.prepare_for_eval()
191
+ # model.apply(cond_prepare)
192
+ print("=> preparing for eval.")
193
+ if self.vector_wise_quantization:
194
+ W_int8, state_W = quantize_rowwise(self.weight)
195
+ else:
196
+ W_int8, state_W = quantize_global(self.weight)
197
+
198
+ self.register_buffer("W_int8", W_int8)
199
+ self.register_buffer("state_W", state_W)
200
+
201
+ del self.weight
202
+
203
+ def forward(self, x):
204
+ if self.training:
205
+ return self._fn.apply(x, self.weight, self.bias)
206
+ else:
207
+ # If it hasn't been "prepared for eval", run the standard forward pass.
208
+ if not hasattr(self, "W_int8"):
209
+ return self._fn.apply(x, self.weight, self.bias)
210
+
211
+ # Otherwise, use pre-computed weights.
212
+ X = x.view(-1, x.size(-1))
213
+ X_int8, state_X = quantize_rowwise(X)
214
+
215
+ if self.vector_wise_quantization:
216
+ return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(
217
+ *x.size()[:-1],
218
+ -1,
219
+ )
220
+ else:
221
+ return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(
222
+ *x.size()[:-1],
223
+ -1,
224
+ )
225
+
226
+
227
+ SwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)
228
+ SwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)
229
+ SwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)
230
+
231
+
232
+ # This is just the standard linear function.
233
+ class StandardLinearFunction(torch.autograd.Function):
234
+ @staticmethod
235
+ def forward(ctx, input, weight, bias=None):
236
+ X = input.view(-1, input.size(-1))
237
+
238
+ ctx.save_for_backward(X, weight, bias)
239
+ output = input.matmul(weight.t())
240
+ if bias is not None:
241
+ output += bias.unsqueeze(0).expand_as(output)
242
+ return output.view(*input.size()[:-1], -1)
243
+
244
+ @staticmethod
245
+ def backward(ctx, grad_output_3D):
246
+ input, weight, bias = ctx.saved_tensors
247
+
248
+ grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))
249
+
250
+ grad_input = grad_weight = grad_bias = None
251
+
252
+ if ctx.needs_input_grad[0]:
253
+ grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)
254
+ if ctx.needs_input_grad[1]:
255
+ grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))
256
+ if bias is not None and ctx.needs_input_grad[2]:
257
+ grad_bias = grad_output.sum(0)
258
+
259
+ return grad_input, grad_weight, grad_bias
260
+
261
+
262
+ class StandardLinear(nn.Linear):
263
+ def forward(self, x):
264
+ return StandardLinearFunction.apply(x, self.weight, self.bias)
llava_next/lib/python3.10/site-packages/bitsandbytes/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/research/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import nn
2
+ from .autograd._functions import (
3
+ matmul_fp8_global,
4
+ matmul_fp8_mixed,
5
+ switchback_bnb,
6
+ )
llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/__pycache__/_functions.cpython-310.pyc ADDED
Binary file (8.89 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/research/autograd/_functions.py ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce # Required in Python 3
2
+ import operator
3
+ from typing import Optional
4
+ import warnings
5
+
6
+ import torch
7
+
8
+ from bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState
9
+ import bitsandbytes.functional as F
10
+
11
+
12
+ # math.prod not compatible with python < 3.8
13
+ def prod(iterable):
14
+ return reduce(operator.mul, iterable, 1)
15
+
16
+
17
+ class MatMulFP8Mixed(torch.autograd.Function):
18
+ # forward is the same, but we added the fallback for pre-turing GPUs
19
+ # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
20
+
21
+ @staticmethod
22
+ def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):
23
+ # default of pytorch behavior if inputs are empty
24
+ ctx.is_empty = False
25
+ if prod(A.shape) == 0:
26
+ ctx.is_empty = True
27
+ ctx.A = A
28
+ ctx.B = B
29
+
30
+ B_shape = B.shape
31
+ if A.shape[-1] == B_shape[0]:
32
+ return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
33
+ else:
34
+ return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
35
+
36
+ # 1. Dequantize
37
+ # 2. MatmulnN
38
+ cA, state = F.quantize_blockwise(A, code=fw_code, blocksize=bsz)
39
+ fp8A = F.dequantize_blockwise(cA, state, blocksize=bsz).to(A.dtype)
40
+
41
+ cB, state = F.quantize(B.float(), code=fw_code)
42
+ fp8B = F.dequantize(cB, state).to(B.dtype)
43
+
44
+ output = torch.matmul(fp8A, fp8B)
45
+
46
+ # output is half
47
+
48
+ # 3. Save state
49
+ ctx.fw_code = fw_code
50
+ ctx.bw_code = bw_code
51
+ ctx.bsz = bsz
52
+ ctx.bsz2 = bsz2
53
+ ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype
54
+
55
+ if any(ctx.needs_input_grad[:2]):
56
+ # NOTE: we send back A, and re-quant.
57
+ ctx.tensors = (A, fp8B)
58
+ else:
59
+ ctx.tensors = (None, None)
60
+
61
+ return output
62
+
63
+ @staticmethod
64
+ def backward(ctx, grad_output):
65
+ if ctx.is_empty:
66
+ return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None
67
+
68
+ req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad
69
+ A, B = ctx.tensors
70
+
71
+ grad_A, grad_B = None, None
72
+
73
+ # TODO: Fix blocksize to be output_dim
74
+ cgrad_out, state = F.quantize_blockwise(grad_output, code=ctx.bw_code, blocksize=ctx.bsz2)
75
+ fp8out = F.dequantize_blockwise(cgrad_out, state, blocksize=ctx.bsz2).to(grad_output.dtype)
76
+
77
+ # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)
78
+ # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)
79
+
80
+ # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
81
+ # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')
82
+ # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose
83
+ # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])
84
+
85
+ # not supported by PyTorch. TODO: create work-around
86
+ if req_gradA:
87
+ grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)
88
+
89
+ if req_gradB:
90
+ if len(A.shape) == 3:
91
+ At = A.transpose(2, 1).contiguous()
92
+ else:
93
+ At = A.transpose(1, 0).contiguous()
94
+ # cA, state = F.quantize(At.float(), code=ctx.fw_code)
95
+ # fp8At = F.dequantize(cA, state).to(A.dtype)
96
+ grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype)
97
+
98
+ return grad_A, grad_B, None, None, None, None, None
99
+
100
+
101
+ class MatMulFP8Global(torch.autograd.Function):
102
+ # forward is the same, but we added the fallback for pre-turing GPUs
103
+ # backward is mostly the same, but adds one extra clause (see "elif state.CxB is not None")
104
+
105
+ @staticmethod
106
+ def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):
107
+ # default of pytorch behavior if inputs are empty
108
+ ctx.is_empty = False
109
+ if prod(A.shape) == 0:
110
+ ctx.is_empty = True
111
+ ctx.A = A
112
+ ctx.B = B
113
+
114
+ B_shape = B.shape
115
+ if A.shape[-1] == B_shape[0]:
116
+ return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)
117
+ else:
118
+ return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)
119
+
120
+ # 1. Dequantize
121
+ # 2. MatmulnN
122
+ cA, state = F.quantize(A.float(), code=fw_code)
123
+ fp8A = F.dequantize(cA, state).to(A.dtype)
124
+
125
+ cB, state = F.quantize(B.float(), code=fw_code)
126
+ fp8B = F.dequantize(cB, state).to(B.dtype)
127
+
128
+ output = torch.matmul(fp8A, fp8B)
129
+
130
+ # output is half
131
+
132
+ # 3. Save state
133
+ ctx.fw_code = fw_code
134
+ ctx.bw_code = bw_code
135
+ ctx.bsz = bsz
136
+ ctx.bsz2 = bsz2
137
+ ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype
138
+
139
+ if any(ctx.needs_input_grad[:2]):
140
+ # NOTE: we send back A, and re-quant.
141
+ ctx.tensors = (A, fp8B)
142
+ else:
143
+ ctx.tensors = (None, None)
144
+
145
+ return output
146
+
147
+ @staticmethod
148
+ def backward(ctx, grad_output):
149
+ if ctx.is_empty:
150
+ return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None
151
+
152
+ req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad
153
+ A, B = ctx.tensors
154
+
155
+ grad_A, grad_B = None, None
156
+
157
+ # TODO: Fix blocksize to be output_dim
158
+ cgrad_out, state = F.quantize(grad_output.float(), code=ctx.bw_code)
159
+ fp8out = F.dequantize(cgrad_out, state).to(grad_output.dtype)
160
+
161
+ # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)
162
+ # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)
163
+
164
+ # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
165
+ # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')
166
+ # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose
167
+ # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])
168
+
169
+ # not supported by PyTorch. TODO: create work-around
170
+ if req_gradA:
171
+ grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)
172
+
173
+ if req_gradB:
174
+ if len(A.shape) == 3:
175
+ At = A.transpose(2, 1).contiguous()
176
+ else:
177
+ At = A.transpose(1, 0).contiguous()
178
+ cA, state = F.quantize(At.float(), code=ctx.fw_code)
179
+ fp8At = F.dequantize(cA, state).to(A.dtype)
180
+ grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype)
181
+
182
+ return grad_A, grad_B, None, None, None, None, None
183
+
184
+
185
+ class SwitchBackBnb(torch.autograd.Function):
186
+ @staticmethod
187
+ # TODO: the B008 on the line below is a likely bug; the current implementation will
188
+ # have each SwitchBackBnb instance share a single MatmulLtState instance!!!
189
+ def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState()): # noqa: B008
190
+ # default to pytorch behavior if inputs are empty
191
+ ctx.is_empty = False
192
+ if prod(A.shape) == 0:
193
+ ctx.is_empty = True
194
+ ctx.A = A
195
+ ctx.B = B
196
+ ctx.bias = bias
197
+ if A.shape[-1] == B.shape[0]:
198
+ return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)
199
+ else:
200
+ return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)
201
+
202
+ # 1. Quantize A
203
+ # 2. Quantize B
204
+ # 3. Matmul
205
+ # 4. Mixed-precision decomposition matmul
206
+ # 5. Save state
207
+ formatB = state.formatB
208
+ input_shape = A.shape
209
+ if state.outlier_pool is None:
210
+ state.outlier_pool = GlobalOutlierPooler.get_instance()
211
+
212
+ # Cast A to fp16
213
+ if A.dtype != torch.float16:
214
+ warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")
215
+
216
+ # 1. Quantize A
217
+ if len(A.shape) == 3:
218
+ A = A.view(-1, A.shape[-1]).contiguous()
219
+ CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A.to(torch.float16), threshold=state.threshold)
220
+
221
+ if state.threshold > 0.0 and coo_tensorA is not None:
222
+ if state.has_fp16_weights:
223
+ idx = torch.unique(coo_tensorA.colidx).long()
224
+ CA[:, idx] = 0
225
+ CAt[:, idx] = 0
226
+ subA = A[:, idx]
227
+ state.subB = B[:, idx].t().contiguous()
228
+ state.idx = idx
229
+ else:
230
+ if state.CxB is None:
231
+ # B in in 8-bit row-major, we can transform it back to 16-bit to extract outlier dimensions
232
+ # we also need to convert it to the turing/ampere format
233
+ state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
234
+ else:
235
+ # print('A shape', A.shape)
236
+ if not state.has_fp16_weights and state.CxB is None:
237
+ state.CxB, state.SB = F.transform(state.CB, to_order=formatB)
238
+ subA = None
239
+
240
+ # 2. Quantize B
241
+ if state.has_fp16_weights:
242
+ # print('B shape', B.shape)
243
+ has_grad = True if (getattr(B, "grad", None) is not None) else False
244
+ is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)
245
+ if is_transposed:
246
+ B = B.contiguous()
247
+
248
+ if (state.is_training and not has_grad) or state.CxB is None:
249
+ state.reset_grads()
250
+ (
251
+ CB,
252
+ state.CBt,
253
+ state.SCB,
254
+ state.SCBt,
255
+ coo_tensorB,
256
+ ) = F.double_quant(B.to(torch.float16))
257
+ state.CxB, state.SB = F.transform(CB, to_order=formatB)
258
+ else:
259
+ has_grad = False
260
+
261
+ if coo_tensorA is not None and not state.has_fp16_weights:
262
+ # extract outliers
263
+
264
+ outlier_idx = torch.unique(coo_tensorA.colidx)
265
+ state.idx = outlier_idx
266
+ # state.outlier_pool.add_outliers(outlier_idx, A.shape[-1])
267
+ # if state.use_pool and state.outlier_pool.model_dim == A.shape[-1]:
268
+ # # do not use pool for 2nd FFN layer
269
+ # state.idx = state.outlier_pool.get_current_outlier_idx().to(A.device)
270
+ # else:
271
+ # state.idx = outlier_idx
272
+ outliers = F.extract_outliers(state.CxB, state.SB, state.idx.int())
273
+ state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)
274
+ CA[:, state.idx.long()] = 0
275
+ CAt[:, state.idx.long()] = 0
276
+ subA = A[:, state.idx.long()]
277
+
278
+ shapeB = state.SB[0]
279
+
280
+ if len(input_shape) == 3:
281
+ output_shape = (input_shape[0], input_shape[1], shapeB[0])
282
+ else:
283
+ output_shape = (input_shape[0], shapeB[0])
284
+
285
+ # 3. Matmul
286
+ C32A, SA = F.transform(CA, "col32")
287
+ out32, Sout32 = F.igemmlt(C32A, state.CxB, SA, state.SB)
288
+ # we apply the fused bias here
289
+
290
+ if bias is None or bias.dtype == torch.float16:
291
+ output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=bias)
292
+ output = output.to(A.dtype)
293
+ else: # apply bias separately
294
+ output = F.mm_dequant(out32, Sout32, SCA, state.SCB, bias=None)
295
+ output = output.to(A.dtype).add_(bias)
296
+
297
+ # 4. Mixed-precision decomposition matmul
298
+ if coo_tensorA is not None and subA is not None:
299
+ output += torch.matmul(subA, state.subB)
300
+
301
+ # 5. Save state
302
+ ctx.state = state
303
+
304
+ ctx.formatB = formatB
305
+ ctx.grad_shape = input_shape
306
+ ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype
307
+
308
+ if any(ctx.needs_input_grad[:2]):
309
+ ctx.tensors = (CAt, subA, A)
310
+ ctx.tensor_states = (SCAt, state.idx)
311
+ else:
312
+ ctx.tensors = [None, None, None]
313
+ ctx.tensor_states = (None, None)
314
+ ctx.save_for_backward(None, None)
315
+
316
+ clone_func = torch.clone if len(output_shape) == 3 else lambda x: x
317
+ return clone_func(output.view(output_shape))
318
+
319
+ @staticmethod
320
+ def backward(ctx, grad_output):
321
+ if ctx.is_empty:
322
+ bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)
323
+ return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None
324
+ req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad
325
+ CAt, subA, A = ctx.tensors
326
+ SCAt, idx = ctx.tensor_states
327
+ formatB = ctx.formatB
328
+ state = ctx.state
329
+ grad_A = grad_B = grad_bias = None
330
+
331
+ if req_gradBias:
332
+ # compute grad_bias first before changing grad_output dtype
333
+ grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)
334
+
335
+ # Cast grad_output to fp16
336
+ if len(grad_output.shape) == 3:
337
+ grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()
338
+
339
+ Cgrad, Cgradt, SCgrad, SCgradt, coo_tensor = F.double_quant(grad_output.to(torch.float16))
340
+
341
+ if req_gradB:
342
+ # print('back A shape', A.shape)
343
+ # print('grad output t shape', grad_output.t().shape)
344
+ grad_B = torch.matmul(grad_output.t(), A)
345
+
346
+ if req_gradA:
347
+ if state.CBt is not None:
348
+ C32grad, Sgrad = F.transform(Cgrad, "col32")
349
+ if state.CxBt is None:
350
+ state.CxBt, state.SBt = F.transform(state.CBt, to_order=formatB, transpose=True)
351
+ # print('back B shape', state.CxBt.shape)
352
+ # print('back grad shape', C32grad.shape)
353
+ gradA32, SgradA32 = F.igemmlt(C32grad, state.CxBt, Sgrad, state.SBt)
354
+ grad_A = F.mm_dequant(gradA32, SgradA32, SCgrad, state.SCBt).view(ctx.grad_shape).to(ctx.dtype_A)
355
+
356
+ elif state.CB is not None:
357
+ CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))
358
+ grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)
359
+ else:
360
+ raise Exception("State must contain either CBt or CB matrix for backward")
361
+
362
+ return grad_A, grad_B, None, grad_bias, None
363
+
364
+
365
+ def get_block_sizes(input_matrix, weight_matrix):
366
+ input_features = input_matrix.shape[-1]
367
+ output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]
368
+ array = [4096, 2048, 1024, 512, 256, 128, 64, 0]
369
+ bsz, bsz2 = 1024, 1024
370
+ for i, k in enumerate(array):
371
+ if input_features > array[i + 1]:
372
+ bsz = k
373
+ break
374
+ for i, k in enumerate(array):
375
+ if output_features > array[i + 1]:
376
+ bsz2 = k
377
+ break
378
+
379
+ return bsz, bsz2
380
+
381
+
382
+ def matmul_fp8_global(
383
+ A: torch.Tensor,
384
+ B: torch.Tensor,
385
+ fw_code: torch.Tensor,
386
+ bw_code: torch.Tensor,
387
+ out: Optional[torch.Tensor] = None,
388
+ bsz: int = -1,
389
+ bsz2: int = -1,
390
+ ):
391
+ if bsz == -1 or bsz2 == -1:
392
+ bsz, bsz2 = get_block_sizes(A, B)
393
+ return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)
394
+
395
+
396
+ def matmul_fp8_mixed(
397
+ A: torch.Tensor,
398
+ B: torch.Tensor,
399
+ fw_code: torch.Tensor,
400
+ bw_code: torch.Tensor,
401
+ out: Optional[torch.Tensor] = None,
402
+ bsz: int = -1,
403
+ bsz2: int = -1,
404
+ ):
405
+ if bsz == -1 or bsz2 == -1:
406
+ bsz, bsz2 = get_block_sizes(A, B)
407
+ return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2)
408
+
409
+
410
+ def switchback_bnb(
411
+ A: torch.Tensor,
412
+ B: torch.Tensor,
413
+ out: Optional[torch.Tensor] = None,
414
+ state: Optional[MatmulLtState] = None,
415
+ threshold=0.0,
416
+ bias=None,
417
+ ):
418
+ state = state or MatmulLtState()
419
+ if threshold > 0.0:
420
+ state.threshold = threshold
421
+ return SwitchBackBnb.apply(A, B, out, bias, state)
llava_next/lib/python3.10/site-packages/bitsandbytes/research/nn/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import LinearFP8Global, LinearFP8Mixed
llava_next/lib/python3.10/site-packages/bitsandbytes/research/nn/modules.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TypeVar
2
+
3
+ import torch
4
+ from torch import nn
5
+
6
+ import bitsandbytes as bnb
7
+
8
+ T = TypeVar("T", bound="torch.nn.Module")
9
+
10
+
11
+ class LinearFP8Mixed(nn.Linear):
12
+ def __init__(self, input_features, output_features, bias=True):
13
+ super().__init__(input_features, output_features, bias)
14
+ self.bw_code = None
15
+ self.fw_code = None
16
+ array = [4096, 2048, 1024, 512, 256, 128, 64, 0]
17
+ for i, k in enumerate(array):
18
+ if input_features > array[i + 1]:
19
+ self.bsz = k
20
+ break
21
+ for i, k in enumerate(array):
22
+ if output_features > array[i + 1]:
23
+ self.bsz2 = k
24
+ break
25
+
26
+ def forward(self, x: torch.Tensor):
27
+ if self.fw_code is None:
28
+ self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)
29
+ self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)
30
+
31
+ out = bnb.research.matmul_fp8_mixed(
32
+ x,
33
+ self.weight.t(),
34
+ fw_code=self.fw_code,
35
+ bw_code=self.bw_code,
36
+ bsz=self.bsz,
37
+ bsz2=self.bsz2,
38
+ )
39
+ if self.bias is not None:
40
+ out += self.bias
41
+
42
+ return out
43
+
44
+
45
+ class LinearFP8Global(nn.Linear):
46
+ def __init__(self, input_features, output_features, bias=True):
47
+ super().__init__(input_features, output_features, bias)
48
+ self.bw_code = None
49
+ self.fw_code = None
50
+ array = [4096, 2048, 1024, 512, 256, 128, 64, 0]
51
+ for i, k in enumerate(array):
52
+ if input_features > array[i + 1]:
53
+ self.bsz = k
54
+ break
55
+ for i, k in enumerate(array):
56
+ if output_features > array[i + 1]:
57
+ self.bsz2 = k
58
+ break
59
+
60
+ def forward(self, x: torch.Tensor):
61
+ if self.fw_code is None:
62
+ self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)
63
+ self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)
64
+
65
+ out = bnb.matmul_fp8_global(
66
+ x,
67
+ self.weight.t(),
68
+ fw_code=self.fw_code,
69
+ bw_code=self.bw_code,
70
+ bsz=self.bsz,
71
+ bsz2=self.bsz2,
72
+ )
73
+ if self.bias is not None:
74
+ out += self.bias
75
+
76
+ return out
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__init__.py ADDED
File without changes
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/dequantize_rowwise.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_mixed_dequantize.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/int8_matmul_rowwise_dequantize.cpython-310.pyc ADDED
Binary file (4.8 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_columnwise_and_transpose.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_global.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/quantize_rowwise.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/__pycache__/triton_utils.cpython-310.pyc ADDED
Binary file (353 Bytes). View file
 
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/dequantize_rowwise.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+ from bitsandbytes.triton.triton_utils import is_triton_available
6
+
7
+ if not is_triton_available():
8
+
9
+ def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):
10
+ return None
11
+ else:
12
+ import triton
13
+ import triton.language as tl
14
+
15
+ # rowwise quantize
16
+
17
+ # TODO: autotune this better.
18
+ @triton.autotune(
19
+ configs=[
20
+ triton.Config({}, num_stages=1, num_warps=8),
21
+ triton.Config({}, num_stages=2, num_warps=8),
22
+ triton.Config({}, num_stages=4, num_warps=8),
23
+ triton.Config({}, num_stages=8, num_warps=8),
24
+ triton.Config({}, num_stages=1),
25
+ triton.Config({}, num_stages=2),
26
+ triton.Config({}, num_stages=4),
27
+ triton.Config({}, num_stages=8),
28
+ triton.Config({}, num_warps=1),
29
+ triton.Config({}, num_warps=2),
30
+ triton.Config({}, num_warps=4),
31
+ triton.Config({}, num_warps=8),
32
+ ],
33
+ key=["n_elements"],
34
+ )
35
+ @triton.jit
36
+ def _dequantize_rowwise(
37
+ x_ptr,
38
+ state_x,
39
+ output_ptr,
40
+ inv_127,
41
+ n_elements,
42
+ BLOCK_SIZE: tl.constexpr,
43
+ P2: tl.constexpr,
44
+ ):
45
+ pid = tl.program_id(axis=0)
46
+ block_start = pid * BLOCK_SIZE
47
+ arange = tl.arange(0, P2)
48
+ offsets = block_start + arange
49
+ row_mask = arange < BLOCK_SIZE
50
+ x = tl.load(x_ptr + offsets, mask=row_mask)
51
+ max_val = tl.load(state_x + pid)
52
+ output = max_val * x * inv_127
53
+ tl.store(output_ptr + offsets, output, mask=row_mask)
54
+
55
+ def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):
56
+ output = torch.empty(*x.shape, device=x.device, dtype=torch.float16)
57
+
58
+ P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))
59
+
60
+ assert x.is_cuda and output.is_cuda
61
+ n_elements = output.numel()
62
+ grid = lambda meta: (x.shape[0],)
63
+ _dequantize_rowwise[grid](x, state_x, output, 1.0 / 127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)
64
+ return output
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_mixed_dequantize.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from bitsandbytes.triton.triton_utils import is_triton_available
4
+
5
+ if not is_triton_available():
6
+
7
+ def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):
8
+ return None
9
+ else:
10
+ import triton
11
+ import triton.language as tl
12
+ from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
13
+
14
+ # This is a matmul kernel based on triton.ops.matmul
15
+ # It is modified to support rowwise quantized input and global quantized weight
16
+ # It's purpose is fused matmul then dequantize
17
+ # It does support bias.
18
+
19
+ def init_to_zero(name):
20
+ return lambda nargs: nargs[name].zero_()
21
+
22
+ def get_configs_io_bound():
23
+ configs = []
24
+ for num_stages in [2, 3, 4, 5, 6]:
25
+ for block_m in [16, 32]:
26
+ for block_k in [32, 64]:
27
+ for block_n in [32, 64, 128, 256]:
28
+ num_warps = 2 if block_n <= 64 else 4
29
+ configs.append(
30
+ triton.Config(
31
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1},
32
+ num_stages=num_stages,
33
+ num_warps=num_warps,
34
+ ),
35
+ )
36
+ # split_k
37
+ for split_k in [2, 4, 8, 16]:
38
+ configs.append(
39
+ triton.Config(
40
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": split_k},
41
+ num_stages=num_stages,
42
+ num_warps=num_warps,
43
+ pre_hook=init_to_zero("C"),
44
+ ),
45
+ )
46
+ return configs
47
+
48
+ @triton.autotune(
49
+ configs=[
50
+ # basic configs for compute-bound matmuls
51
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
52
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
53
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
54
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
55
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
56
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
57
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
58
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
59
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2),
60
+ # good for int8
61
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
62
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
63
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
64
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
65
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
66
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
67
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
68
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
69
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2),
70
+ *get_configs_io_bound(),
71
+ ],
72
+ key=["M", "N", "K"],
73
+ prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10},
74
+ )
75
+ @triton.heuristics(
76
+ {
77
+ "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
78
+ },
79
+ )
80
+ @triton.jit
81
+ def _int8_matmul_mixed_dequantize(
82
+ A,
83
+ B,
84
+ C,
85
+ bias,
86
+ state_x_ptr,
87
+ state_w_ptr,
88
+ M,
89
+ N,
90
+ K,
91
+ divfactor: tl.constexpr,
92
+ has_bias: tl.constexpr,
93
+ stride_am,
94
+ stride_ak,
95
+ stride_bk,
96
+ stride_bn,
97
+ stride_cm,
98
+ stride_cn,
99
+ BLOCK_M: tl.constexpr,
100
+ BLOCK_N: tl.constexpr,
101
+ BLOCK_K: tl.constexpr,
102
+ GROUP_M: tl.constexpr,
103
+ SPLIT_K: tl.constexpr,
104
+ EVEN_K: tl.constexpr,
105
+ ACC_TYPE: tl.constexpr,
106
+ ):
107
+ # matrix multiplication
108
+ pid = tl.program_id(0)
109
+ pid_z = tl.program_id(1)
110
+ grid_m = tl.cdiv(M, BLOCK_M)
111
+ grid_n = tl.cdiv(N, BLOCK_N)
112
+ # re-order program ID for better L2 performance
113
+ width = GROUP_M * grid_n
114
+ group_id = pid // width
115
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
116
+ pid_m = group_id * GROUP_M + (pid % group_size)
117
+ pid_n = (pid % width) // (group_size)
118
+ # do matrix multiplication
119
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
120
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
121
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
122
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
123
+ rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
124
+ # pointers
125
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
126
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
127
+
128
+ # rematerialize rm and rn to save registers
129
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
130
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
131
+
132
+ w_factor = tl.load(state_w_ptr)
133
+ x_factor = tl.load(state_x_ptr + ram)[:, None]
134
+
135
+ # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
136
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)
137
+ for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
138
+ if EVEN_K:
139
+ a = tl.load(A)
140
+ b = tl.load(B)
141
+ else:
142
+ k_remaining = K - k * (BLOCK_K * SPLIT_K)
143
+ a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)
144
+ b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)
145
+ acc += tl.dot(a, b)
146
+ A += BLOCK_K * SPLIT_K * stride_ak
147
+ B += BLOCK_K * SPLIT_K * stride_bk
148
+
149
+ acc = w_factor * (x_factor * (acc * divfactor))
150
+ acc = acc.to(C.dtype.element_ty)
151
+
152
+ # conditionally add bias
153
+ if has_bias:
154
+ bias = tl.load(bias + rn).to(C.dtype.element_ty)
155
+ acc = acc + bias[None, :]
156
+
157
+ C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
158
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
159
+ # handles write-back with reduction-splitting
160
+ if SPLIT_K == 1:
161
+ tl.store(C, acc, mask=mask)
162
+ else:
163
+ tl.atomic_add(C, acc, mask=mask)
164
+
165
+ def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):
166
+ device = a.device
167
+ divfactor = 1.0 / (127.0 * 127.0)
168
+ has_bias = 0 if bias is None else 1
169
+ # handle non-contiguous inputs if necessary
170
+ if a.stride(0) > 1 and a.stride(1) > 1:
171
+ a = a.contiguous()
172
+ if b.stride(0) > 1 and b.stride(1) > 1:
173
+ b = b.contiguous()
174
+ # checks constraints
175
+ assert a.shape[1] == b.shape[0], "incompatible dimensions"
176
+ M, K = a.shape
177
+ _, N = b.shape
178
+ # allocates output
179
+ c = torch.empty((M, N), device=device, dtype=torch.float16)
180
+ # accumulator types
181
+ ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
182
+ # launch int8_matmul_mixed_dequantize kernel
183
+ grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), META["SPLIT_K"])
184
+ _int8_matmul_mixed_dequantize[grid](
185
+ a,
186
+ b,
187
+ c,
188
+ bias,
189
+ state_x,
190
+ state_w,
191
+ M,
192
+ N,
193
+ K,
194
+ divfactor,
195
+ has_bias,
196
+ a.stride(0),
197
+ a.stride(1),
198
+ b.stride(0),
199
+ b.stride(1),
200
+ c.stride(0),
201
+ c.stride(1),
202
+ GROUP_M=8,
203
+ ACC_TYPE=ACC_TYPE,
204
+ )
205
+ return c
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from bitsandbytes.triton.triton_utils import is_triton_available
4
+
5
+ if not is_triton_available():
6
+
7
+ def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):
8
+ return None
9
+ else:
10
+ import triton
11
+ import triton.language as tl
12
+ from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
13
+
14
+ # This is a matmul kernel based on triton.ops.matmul
15
+ # It is modified to support rowwise quantized input and columnwise quantized weight
16
+ # It's purpose is fused matmul then dequantize
17
+ # It does support bias.
18
+
19
+ def init_to_zero(name):
20
+ return lambda nargs: nargs[name].zero_()
21
+
22
+ def get_configs_io_bound():
23
+ configs = []
24
+ for num_stages in [2, 3, 4, 5, 6]:
25
+ for block_m in [16, 32]:
26
+ for block_k in [32, 64]:
27
+ for block_n in [32, 64, 128, 256]:
28
+ num_warps = 2 if block_n <= 64 else 4
29
+ configs.append(
30
+ triton.Config(
31
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": 1},
32
+ num_stages=num_stages,
33
+ num_warps=num_warps,
34
+ ),
35
+ )
36
+ # split_k
37
+ for split_k in [2, 4, 8, 16]:
38
+ configs.append(
39
+ triton.Config(
40
+ {"BLOCK_M": block_m, "BLOCK_N": block_n, "BLOCK_K": block_k, "SPLIT_K": split_k},
41
+ num_stages=num_stages,
42
+ num_warps=num_warps,
43
+ pre_hook=init_to_zero("C"),
44
+ ),
45
+ )
46
+ return configs
47
+
48
+ @triton.autotune(
49
+ configs=[
50
+ # basic configs for compute-bound matmuls
51
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
52
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8),
53
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
54
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
55
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
56
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
57
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
58
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4),
59
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2),
60
+ # good for int8
61
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
62
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=3, num_warps=8),
63
+ triton.Config({"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
64
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
65
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4),
66
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
67
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
68
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4),
69
+ triton.Config({"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2),
70
+ *get_configs_io_bound(),
71
+ ],
72
+ key=["M", "N", "K"],
73
+ prune_configs_by={"early_config_prune": early_config_prune, "perf_model": estimate_matmul_time, "top_k": 10},
74
+ )
75
+ @triton.heuristics(
76
+ {
77
+ "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
78
+ },
79
+ )
80
+ @triton.jit
81
+ def _int8_matmul_rowwise_dequantize(
82
+ A,
83
+ B,
84
+ C,
85
+ bias,
86
+ state_x_ptr,
87
+ state_w_ptr,
88
+ M,
89
+ N,
90
+ K,
91
+ divfactor,
92
+ has_bias: tl.constexpr,
93
+ stride_am,
94
+ stride_ak,
95
+ stride_bk,
96
+ stride_bn,
97
+ stride_cm,
98
+ stride_cn,
99
+ BLOCK_M: tl.constexpr,
100
+ BLOCK_N: tl.constexpr,
101
+ BLOCK_K: tl.constexpr,
102
+ GROUP_M: tl.constexpr,
103
+ SPLIT_K: tl.constexpr,
104
+ EVEN_K: tl.constexpr,
105
+ ACC_TYPE: tl.constexpr,
106
+ ):
107
+ # matrix multiplication
108
+ pid = tl.program_id(0)
109
+ pid_z = tl.program_id(1)
110
+ grid_m = tl.cdiv(M, BLOCK_M)
111
+ grid_n = tl.cdiv(N, BLOCK_N)
112
+ # re-order program ID for better L2 performance
113
+ width = GROUP_M * grid_n
114
+ group_id = pid // width
115
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
116
+ pid_m = group_id * GROUP_M + (pid % group_size)
117
+ pid_n = (pid % width) // (group_size)
118
+ # do matrix multiplication
119
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
120
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
121
+ ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
122
+ rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
123
+ rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
124
+ # pointers
125
+ A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
126
+ B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
127
+
128
+ # rematerialize rm and rn to save registers
129
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
130
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
131
+
132
+ w_factor = tl.load(state_w_ptr + rbn)[None, :]
133
+ x_factor = tl.load(state_x_ptr + ram)[:, None]
134
+
135
+ # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
136
+ acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)
137
+ for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):
138
+ if EVEN_K:
139
+ a = tl.load(A)
140
+ b = tl.load(B)
141
+ else:
142
+ k_remaining = K - k * (BLOCK_K * SPLIT_K)
143
+ a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)
144
+ b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)
145
+ acc += tl.dot(a, b)
146
+ A += BLOCK_K * SPLIT_K * stride_ak
147
+ B += BLOCK_K * SPLIT_K * stride_bk
148
+
149
+ acc = w_factor * (x_factor * (acc * divfactor))
150
+ acc = acc.to(C.dtype.element_ty)
151
+
152
+ if has_bias:
153
+ bias = tl.load(bias + rn).to(C.dtype.element_ty)
154
+ acc = acc + bias[None, :]
155
+
156
+ C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
157
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
158
+ # handles write-back with reduction-splitting
159
+ if SPLIT_K == 1:
160
+ tl.store(C, acc, mask=mask)
161
+ else:
162
+ tl.atomic_add(C, acc, mask=mask)
163
+
164
+ def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):
165
+ divfactor = 1.0 / (127.0 * 127.0)
166
+
167
+ has_bias = 0 if bias is None else 1
168
+
169
+ device = a.device
170
+ # handle non-contiguous inputs if necessary
171
+ if a.stride(0) > 1 and a.stride(1) > 1:
172
+ a = a.contiguous()
173
+ if b.stride(0) > 1 and b.stride(1) > 1:
174
+ b = b.contiguous()
175
+ # checks constraints
176
+ assert a.shape[1] == b.shape[0], "incompatible dimensions"
177
+ M, K = a.shape
178
+ _, N = b.shape
179
+ # allocates output
180
+ c = torch.empty((M, N), device=device, dtype=torch.float16)
181
+ # accumulator types
182
+ ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
183
+ # launch int8_matmul_rowwise_dequantize kernel
184
+ grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), META["SPLIT_K"])
185
+ _int8_matmul_rowwise_dequantize[grid](
186
+ a,
187
+ b,
188
+ c,
189
+ bias,
190
+ state_x,
191
+ state_w,
192
+ M,
193
+ N,
194
+ K,
195
+ divfactor,
196
+ has_bias,
197
+ a.stride(0),
198
+ a.stride(1),
199
+ b.stride(0),
200
+ b.stride(1),
201
+ c.stride(0),
202
+ c.stride(1),
203
+ GROUP_M=8,
204
+ ACC_TYPE=ACC_TYPE,
205
+ )
206
+ return c
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/quantize_columnwise_and_transpose.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+ from bitsandbytes.triton.triton_utils import is_triton_available
6
+
7
+ if not is_triton_available():
8
+
9
+ def quantize_columnwise_and_transpose(x: torch.Tensor):
10
+ return None
11
+ else:
12
+ import triton
13
+ import triton.language as tl
14
+
15
+ # This kernel does fused columnwise quantization and transpose.
16
+
17
+ # TODO: autotune this better.
18
+ @triton.autotune(
19
+ configs=[
20
+ triton.Config({}, num_stages=1),
21
+ triton.Config({}, num_stages=2),
22
+ triton.Config({}, num_stages=4),
23
+ triton.Config({}, num_stages=8),
24
+ triton.Config({}, num_stages=16),
25
+ triton.Config({}, num_stages=1, num_warps=8),
26
+ triton.Config({}, num_stages=2, num_warps=8),
27
+ triton.Config({}, num_stages=4, num_warps=8),
28
+ triton.Config({}, num_stages=8, num_warps=8),
29
+ triton.Config({}, num_stages=16, num_warps=8),
30
+ triton.Config({}, num_warps=1),
31
+ triton.Config({}, num_warps=2),
32
+ triton.Config({}, num_warps=4),
33
+ triton.Config({}, num_warps=8),
34
+ ],
35
+ key=["n_elements"],
36
+ )
37
+ @triton.jit
38
+ def _quantize_columnwise_and_transpose(
39
+ x_ptr,
40
+ output_ptr,
41
+ output_maxs,
42
+ n_elements,
43
+ M: tl.constexpr,
44
+ N: tl.constexpr,
45
+ BLOCK_SIZE: tl.constexpr,
46
+ P2: tl.constexpr,
47
+ ):
48
+ pid = tl.program_id(axis=0)
49
+ block_start = pid
50
+ p2_arange = tl.arange(0, P2)
51
+ p2_arange_mask = p2_arange < M
52
+ arange = p2_arange * N
53
+ offsets = block_start + arange
54
+ x = tl.load(x_ptr + offsets, mask=p2_arange_mask)
55
+ abs_x = tl.abs(x)
56
+ max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0)
57
+ output = tl.libdevice.llrint(127.0 * (x / max_val))
58
+
59
+ new_start = pid * M
60
+ new_offsets = new_start + p2_arange
61
+ tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask)
62
+ tl.store(output_maxs + pid, max_val)
63
+
64
+ def quantize_columnwise_and_transpose(x: torch.Tensor):
65
+ M, N = x.shape
66
+ output = torch.empty(N, M, device=x.device, dtype=torch.int8)
67
+ output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16)
68
+
69
+ P2 = int(2 ** (math.ceil(math.log2(M))))
70
+
71
+ assert x.is_cuda and output.is_cuda
72
+ n_elements = output.numel()
73
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
74
+ _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2)
75
+ return output, output_maxs
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/quantize_global.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from bitsandbytes.triton.triton_utils import is_triton_available
4
+
5
+ if not is_triton_available():
6
+
7
+ def quantize_global_transpose(input):
8
+ return None
9
+
10
+ def quantize_global(x: torch.Tensor):
11
+ return None
12
+ else:
13
+ import triton
14
+ import triton.language as tl
15
+
16
+ # global quantize
17
+ @triton.autotune(
18
+ configs=[
19
+ triton.Config({"BLOCK_SIZE": 1024}, num_warps=4),
20
+ triton.Config({"BLOCK_SIZE": 2048}, num_stages=1),
21
+ ],
22
+ key=["n_elements"],
23
+ )
24
+ @triton.jit
25
+ def _quantize_global(
26
+ x_ptr,
27
+ absmax_inv_ptr,
28
+ output_ptr,
29
+ n_elements,
30
+ BLOCK_SIZE: tl.constexpr,
31
+ ):
32
+ pid = tl.program_id(axis=0)
33
+ block_start = pid * BLOCK_SIZE
34
+ offsets = block_start + tl.arange(0, BLOCK_SIZE)
35
+ mask = offsets < n_elements
36
+ x = tl.load(x_ptr + offsets, mask=mask)
37
+ absmax_inv = tl.load(absmax_inv_ptr)
38
+ output = tl.libdevice.llrint(127.0 * (x * absmax_inv))
39
+ tl.store(output_ptr + offsets, output, mask=mask)
40
+
41
+ def quantize_global(x: torch.Tensor):
42
+ absmax = x.abs().max().unsqueeze(0)
43
+ absmax_inv = 1.0 / absmax
44
+ output = torch.empty(*x.shape, device="cuda", dtype=torch.int8)
45
+ assert x.is_cuda and output.is_cuda
46
+ n_elements = output.numel()
47
+ grid = lambda meta: (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),)
48
+ _quantize_global[grid](x, absmax_inv, output, n_elements)
49
+ return output, absmax
50
+
51
+ # global quantize and transpose
52
+ @triton.autotune(
53
+ configs=[
54
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "GROUP_M": 8}, num_warps=4),
55
+ triton.Config({"BLOCK_M": 128, "BLOCK_N": 128, "GROUP_M": 8}, num_warps=4),
56
+ # ...
57
+ ],
58
+ key=["M", "N"],
59
+ )
60
+ @triton.jit
61
+ def _quantize_global_transpose(
62
+ A,
63
+ absmax_inv_ptr,
64
+ B,
65
+ stride_am,
66
+ stride_an,
67
+ stride_bn,
68
+ stride_bm,
69
+ M,
70
+ N,
71
+ BLOCK_M: tl.constexpr,
72
+ BLOCK_N: tl.constexpr,
73
+ GROUP_M: tl.constexpr,
74
+ ):
75
+ pid = tl.program_id(0)
76
+ grid_m = (M + BLOCK_M - 1) // BLOCK_M
77
+ grid_n = (N + BLOCK_N - 1) // BLOCK_N
78
+
79
+ width = GROUP_M * grid_n
80
+ group_id = pid // width
81
+ group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
82
+ pid_m = group_id * GROUP_M + (pid % group_size)
83
+ pid_n = (pid % width) // group_size
84
+
85
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
86
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
87
+ A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an)
88
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
89
+ a = tl.load(A, mask=mask)
90
+ absmax_inv = tl.load(absmax_inv_ptr)
91
+
92
+ # rematerialize to save registers
93
+ rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
94
+ rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
95
+ B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn)
96
+ mask = (rm < M)[:, None] & (rn < N)[None, :]
97
+
98
+ output = tl.libdevice.llrint(127.0 * (a * absmax_inv))
99
+
100
+ tl.store(B, output, mask=mask)
101
+
102
+ def quantize_global_transpose(input):
103
+ absmax = input.abs().max().unsqueeze(0)
104
+ absmax_inv = 1.0 / absmax
105
+ M, N = input.shape
106
+ out = torch.empty(N, M, device="cuda", dtype=torch.int8)
107
+
108
+ assert out.size(0) == N and out.size(1) == M
109
+ assert input.stride(0) == 1 or input.stride(1) == 1
110
+ assert out.stride(0) == 1 or out.stride(1) == 1
111
+
112
+ grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),)
113
+ _quantize_global_transpose[grid](
114
+ input,
115
+ absmax_inv,
116
+ out,
117
+ input.stride(0),
118
+ input.stride(1),
119
+ out.stride(0),
120
+ out.stride(1),
121
+ M,
122
+ N,
123
+ )
124
+ return out, absmax
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/quantize_rowwise.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+
5
+ from bitsandbytes.triton.triton_utils import is_triton_available
6
+
7
+ if not is_triton_available():
8
+
9
+ def quantize_rowwise(x: torch.Tensor):
10
+ return None
11
+ else:
12
+ import triton
13
+ import triton.language as tl
14
+
15
+ # rowwise quantize
16
+
17
+ # TODO: autotune this better.
18
+ @triton.autotune(
19
+ configs=[
20
+ triton.Config({}, num_stages=1, num_warps=8),
21
+ triton.Config({}, num_stages=2, num_warps=8),
22
+ triton.Config({}, num_stages=4, num_warps=8),
23
+ triton.Config({}, num_stages=8, num_warps=8),
24
+ triton.Config({}, num_stages=1),
25
+ triton.Config({}, num_stages=2),
26
+ triton.Config({}, num_stages=4),
27
+ triton.Config({}, num_stages=8),
28
+ triton.Config({}, num_warps=1),
29
+ triton.Config({}, num_warps=2),
30
+ triton.Config({}, num_warps=4),
31
+ triton.Config({}, num_warps=8),
32
+ ],
33
+ key=["n_elements"],
34
+ )
35
+ @triton.jit
36
+ def _quantize_rowwise(
37
+ x_ptr,
38
+ output_ptr,
39
+ output_maxs,
40
+ n_elements,
41
+ BLOCK_SIZE: tl.constexpr,
42
+ P2: tl.constexpr,
43
+ ):
44
+ pid = tl.program_id(axis=0)
45
+ block_start = pid * BLOCK_SIZE
46
+ arange = tl.arange(0, P2)
47
+ offsets = block_start + arange
48
+ row_mask = arange < BLOCK_SIZE
49
+ x = tl.load(x_ptr + offsets, mask=row_mask)
50
+
51
+ abs_x = tl.abs(x)
52
+ max_val = tl.max(tl.where(row_mask, abs_x, 0), axis=0)
53
+ output = tl.libdevice.llrint(127.0 * (x / max_val))
54
+ tl.store(output_ptr + offsets, output, mask=row_mask)
55
+ tl.store(output_maxs + pid, max_val)
56
+
57
+ def quantize_rowwise(x: torch.Tensor):
58
+ output = torch.empty(*x.shape, device=x.device, dtype=torch.int8)
59
+ output_maxs = torch.empty(x.shape[0], device=x.device, dtype=torch.float16)
60
+
61
+ P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))
62
+
63
+ assert x.is_cuda and output.is_cuda
64
+ n_elements = output.numel()
65
+ grid = lambda meta: (x.shape[0],)
66
+ _quantize_rowwise[grid](x, output, output_maxs, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)
67
+ return output, output_maxs
llava_next/lib/python3.10/site-packages/bitsandbytes/triton/triton_utils.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import importlib
2
+
3
+
4
+ def is_triton_available():
5
+ return importlib.util.find_spec("triton") is not None
parrot/lib/python3.10/site-packages/transformers/__pycache__/convert_graph_to_onnx.cpython-310.pyc ADDED
Binary file (16.4 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/__pycache__/file_utils.cpython-310.pyc ADDED
Binary file (3.72 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/__pycache__/time_series_utils.cpython-310.pyc ADDED
Binary file (8.93 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/__pycache__/tokenization_utils.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (812 Bytes). View file
 
parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/add_new_model_like.cpython-310.pyc ADDED
Binary file (48 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/env.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/lfs.cpython-310.pyc ADDED
Binary file (7.27 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/pt_to_tf.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
parrot/lib/python3.10/site-packages/transformers/commands/__pycache__/run.cpython-310.pyc ADDED
Binary file (3.43 kB). View file