{"repo_id":"bitsandbytes","entity_id":"py:setup","uri":"program://bitsandbytes/module/setup#L1-L41","kind":"module","name":"setup","path":"setup.py","language":"python","start_line":1,"end_line":41,"context_start_line":1,"context_end_line":41,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom distutils.errors import DistutilsModuleError\nfrom warnings import warn\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.dist import Distribution\n\n\n# Tested with wheel v0.29.0\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nclass ExtBuildPy(build_py):\n def run(self):\n # build_cmake needs to be called prior to build_py, as the latter\n # collects the files output into the package directory.\n try:\n self.run_command(\"build_cmake\")\n except DistutilsModuleError:\n warn(\n \"scikit-build-core not installed, CMake will not be invoked automatically. \"\n \"Please install scikit-build-core or run CMake manually to build extensions.\"\n )\n super().run()\n\n\nsetup(\n version=\"0.48.0.dev0\",\n packages=find_packages(),\n distclass=BinaryDistribution,\n cmake_source_dir=\".\",\n cmdclass={\n \"build_py\": ExtBuildPy,\n },\n)","source_hash":"79736ea7b3eb21628c1e84644a8e088b05a6131123232eceef27ee9a0235337f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:setup.BinaryDistribution","uri":"program://bitsandbytes/class/setup.BinaryDistribution#L14-L16","kind":"class","name":"BinaryDistribution","path":"setup.py","language":"python","start_line":14,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom distutils.errors import DistutilsModuleError\nfrom warnings import warn\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.dist import Distribution\n\n\n# Tested with wheel v0.29.0\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nclass ExtBuildPy(build_py):\n def run(self):\n # build_cmake needs to be called prior to build_py, as the latter\n # collects the files output into the package directory.\n try:\n self.run_command(\"build_cmake\")\n except DistutilsModuleError:\n warn(\n \"scikit-build-core not installed, CMake will not be invoked automatically. \"\n \"Please install scikit-build-core or run CMake manually to build extensions.\"\n )\n super().run()\n\n\nsetup(\n version=\"0.48.0.dev0\",\n packages=find_packages(),\n distclass=BinaryDistribution,","source_hash":"79736ea7b3eb21628c1e84644a8e088b05a6131123232eceef27ee9a0235337f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:setup.ExtBuildPy","uri":"program://bitsandbytes/class/setup.ExtBuildPy#L19-L30","kind":"class","name":"ExtBuildPy","path":"setup.py","language":"python","start_line":19,"end_line":30,"context_start_line":1,"context_end_line":41,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom distutils.errors import DistutilsModuleError\nfrom warnings import warn\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.dist import Distribution\n\n\n# Tested with wheel v0.29.0\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nclass ExtBuildPy(build_py):\n def run(self):\n # build_cmake needs to be called prior to build_py, as the latter\n # collects the files output into the package directory.\n try:\n self.run_command(\"build_cmake\")\n except DistutilsModuleError:\n warn(\n \"scikit-build-core not installed, CMake will not be invoked automatically. \"\n \"Please install scikit-build-core or run CMake manually to build extensions.\"\n )\n super().run()\n\n\nsetup(\n version=\"0.48.0.dev0\",\n packages=find_packages(),\n distclass=BinaryDistribution,\n cmake_source_dir=\".\",\n cmdclass={\n \"build_py\": ExtBuildPy,\n },\n)","source_hash":"79736ea7b3eb21628c1e84644a8e088b05a6131123232eceef27ee9a0235337f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:setup.has_ext_modules","uri":"program://bitsandbytes/function/setup.has_ext_modules#L15-L16","kind":"function","name":"has_ext_modules","path":"setup.py","language":"python","start_line":15,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom distutils.errors import DistutilsModuleError\nfrom warnings import warn\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.dist import Distribution\n\n\n# Tested with wheel v0.29.0\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nclass ExtBuildPy(build_py):\n def run(self):\n # build_cmake needs to be called prior to build_py, as the latter\n # collects the files output into the package directory.\n try:\n self.run_command(\"build_cmake\")\n except DistutilsModuleError:\n warn(\n \"scikit-build-core not installed, CMake will not be invoked automatically. \"\n \"Please install scikit-build-core or run CMake manually to build extensions.\"\n )\n super().run()\n\n\nsetup(\n version=\"0.48.0.dev0\",\n packages=find_packages(),\n distclass=BinaryDistribution,","source_hash":"79736ea7b3eb21628c1e84644a8e088b05a6131123232eceef27ee9a0235337f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:setup.run","uri":"program://bitsandbytes/function/setup.run#L20-L30","kind":"function","name":"run","path":"setup.py","language":"python","start_line":20,"end_line":30,"context_start_line":1,"context_end_line":41,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom distutils.errors import DistutilsModuleError\nfrom warnings import warn\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.dist import Distribution\n\n\n# Tested with wheel v0.29.0\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nclass ExtBuildPy(build_py):\n def run(self):\n # build_cmake needs to be called prior to build_py, as the latter\n # collects the files output into the package directory.\n try:\n self.run_command(\"build_cmake\")\n except DistutilsModuleError:\n warn(\n \"scikit-build-core not installed, CMake will not be invoked automatically. \"\n \"Please install scikit-build-core or run CMake manually to build extensions.\"\n )\n super().run()\n\n\nsetup(\n version=\"0.48.0.dev0\",\n packages=find_packages(),\n distclass=BinaryDistribution,\n cmake_source_dir=\".\",\n cmdclass={\n \"build_py\": ExtBuildPy,\n },\n)","source_hash":"79736ea7b3eb21628c1e84644a8e088b05a6131123232eceef27ee9a0235337f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:check_bnb_install","uri":"program://bitsandbytes/module/check_bnb_install#L1-L21","kind":"module","name":"check_bnb_install","path":"check_bnb_install.py","language":"python","start_line":1,"end_line":21,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nimport bitsandbytes as bnb\n\np = torch.nn.Parameter(torch.rand(10, 10).cuda())\na = torch.rand(10, 10).cuda()\n\np1 = p.data.sum().item()\n\nadam = bnb.optim.Adam([p])\n\nout = a * p\nloss = out.sum()\nloss.backward()\nadam.step()\n\np2 = p.data.sum().item()\n\nassert p1 != p2\nprint(\"SUCCESS!\")\nprint(\"Installation was successful!\")","source_hash":"382e8c4c433206303bdb02b54b69b611d3e0f2301fd8636e9ed262cd299b2cf8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:install_cuda","uri":"program://bitsandbytes/module/install_cuda#L1-L100","kind":"module","name":"install_cuda","path":"install_cuda.py","language":"python","start_line":1,"end_line":100,"context_start_line":1,"context_end_line":100,"code":"import os\nimport subprocess\nimport sys\nfrom urllib.request import urlretrieve\n\ncuda_versions = {\n \"118\": \"https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run\",\n \"120\": \"https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run\",\n \"121\": \"https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run\",\n \"122\": \"https://developer.download.nvidia.com/compute/cuda/12.2.2/local_installers/cuda_12.2.2_535.104.05_linux.run\",\n \"123\": \"https://developer.download.nvidia.com/compute/cuda/12.3.2/local_installers/cuda_12.3.2_545.23.08_linux.run\",\n \"124\": \"https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run\",\n \"125\": \"https://developer.download.nvidia.com/compute/cuda/12.5.1/local_installers/cuda_12.5.1_555.42.06_linux.run\",\n \"126\": \"https://developer.download.nvidia.com/compute/cuda/12.6.2/local_installers/cuda_12.6.2_560.35.03_linux.run\",\n}\n\n\ndef install_cuda(version, base_path, download_path):\n formatted_version = f\"{version[:-1]}.{version[-1]}\"\n folder = f\"cuda-{formatted_version}\"\n install_path = os.path.join(base_path, folder)\n\n if os.path.exists(install_path):\n print(f\"Removing existing CUDA version {version} at {install_path}...\")\n subprocess.run([\"rm\", \"-rf\", install_path], check=True)\n\n url = cuda_versions[version]\n filename = url.split(\"/\")[-1]\n filepath = os.path.join(download_path, filename)\n\n if not os.path.exists(filepath):\n print(f\"Downloading CUDA version {version} from {url}...\")\n urlretrieve(url, filepath)\n else:\n print(f\"Installer for CUDA version {version} already downloaded.\")\n\n # Make the installer executable\n subprocess.run([\"chmod\", \"+x\", filepath], check=True)\n\n # Install CUDA\n print(f\"Installing CUDA version {version}...\")\n install_command = [\n \"bash\",\n filepath,\n \"--no-drm\",\n \"--no-man-page\",\n \"--override\",\n \"--toolkitpath=\" + install_path,\n \"--toolkit\",\n \"--silent\",\n ]\n\n print(f\"Running command: {' '.join(install_command)}\")\n\n try:\n subprocess.run(install_command, check=True)\n except subprocess.CalledProcessError as e:\n print(f\"Installation failed for CUDA version {version}: {e}\")\n return\n finally:\n # Delete the installer file\n os.remove(filepath)\n\n print(f\"CUDA version {version} installed at {install_path}\")\n\n\ndef main():\n user_base_path = os.path.expanduser(\"~/cuda\")\n system_base_path = \"/usr/local/cuda\"\n base_path = user_base_path # default to user-specific installation\n download_path = \"/tmp\" # default download path\n\n if len(sys.argv) < 2:\n print(\"Usage: python install_cuda.py [user/system] [download_path]\")\n sys.exit(1)\n\n version = sys.argv[1]\n if len(sys.argv) > 2:\n base_path = system_base_path if sys.argv[2] == \"system\" else user_base_path\n if len(sys.argv) > 3:\n download_path = sys.argv[3]\n\n if not os.path.exists(base_path):\n os.makedirs(base_path)\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Install CUDA version(s)\n if version == \"all\":\n for ver in cuda_versions:\n install_cuda(ver, base_path, download_path)\n elif version in cuda_versions:\n install_cuda(version, base_path, download_path)\n else:\n print(f\"Invalid CUDA version: {version}. Available versions are: {', '.join(cuda_versions.keys())}\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"095ec03a4ea2b7e323eddcb098024f3530560ea1ea8bfb677e246d50f72a7df3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:install_cuda.install_cuda","uri":"program://bitsandbytes/function/install_cuda.install_cuda#L18-L64","kind":"function","name":"install_cuda","path":"install_cuda.py","language":"python","start_line":18,"end_line":64,"context_start_line":1,"context_end_line":84,"code":"import os\nimport subprocess\nimport sys\nfrom urllib.request import urlretrieve\n\ncuda_versions = {\n \"118\": \"https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run\",\n \"120\": \"https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run\",\n \"121\": \"https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run\",\n \"122\": \"https://developer.download.nvidia.com/compute/cuda/12.2.2/local_installers/cuda_12.2.2_535.104.05_linux.run\",\n \"123\": \"https://developer.download.nvidia.com/compute/cuda/12.3.2/local_installers/cuda_12.3.2_545.23.08_linux.run\",\n \"124\": \"https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run\",\n \"125\": \"https://developer.download.nvidia.com/compute/cuda/12.5.1/local_installers/cuda_12.5.1_555.42.06_linux.run\",\n \"126\": \"https://developer.download.nvidia.com/compute/cuda/12.6.2/local_installers/cuda_12.6.2_560.35.03_linux.run\",\n}\n\n\ndef install_cuda(version, base_path, download_path):\n formatted_version = f\"{version[:-1]}.{version[-1]}\"\n folder = f\"cuda-{formatted_version}\"\n install_path = os.path.join(base_path, folder)\n\n if os.path.exists(install_path):\n print(f\"Removing existing CUDA version {version} at {install_path}...\")\n subprocess.run([\"rm\", \"-rf\", install_path], check=True)\n\n url = cuda_versions[version]\n filename = url.split(\"/\")[-1]\n filepath = os.path.join(download_path, filename)\n\n if not os.path.exists(filepath):\n print(f\"Downloading CUDA version {version} from {url}...\")\n urlretrieve(url, filepath)\n else:\n print(f\"Installer for CUDA version {version} already downloaded.\")\n\n # Make the installer executable\n subprocess.run([\"chmod\", \"+x\", filepath], check=True)\n\n # Install CUDA\n print(f\"Installing CUDA version {version}...\")\n install_command = [\n \"bash\",\n filepath,\n \"--no-drm\",\n \"--no-man-page\",\n \"--override\",\n \"--toolkitpath=\" + install_path,\n \"--toolkit\",\n \"--silent\",\n ]\n\n print(f\"Running command: {' '.join(install_command)}\")\n\n try:\n subprocess.run(install_command, check=True)\n except subprocess.CalledProcessError as e:\n print(f\"Installation failed for CUDA version {version}: {e}\")\n return\n finally:\n # Delete the installer file\n os.remove(filepath)\n\n print(f\"CUDA version {version} installed at {install_path}\")\n\n\ndef main():\n user_base_path = os.path.expanduser(\"~/cuda\")\n system_base_path = \"/usr/local/cuda\"\n base_path = user_base_path # default to user-specific installation\n download_path = \"/tmp\" # default download path\n\n if len(sys.argv) < 2:\n print(\"Usage: python install_cuda.py [user/system] [download_path]\")\n sys.exit(1)\n\n version = sys.argv[1]\n if len(sys.argv) > 2:\n base_path = system_base_path if sys.argv[2] == \"system\" else user_base_path\n if len(sys.argv) > 3:\n download_path = sys.argv[3]\n\n if not os.path.exists(base_path):\n os.makedirs(base_path)","source_hash":"095ec03a4ea2b7e323eddcb098024f3530560ea1ea8bfb677e246d50f72a7df3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:install_cuda.main","uri":"program://bitsandbytes/function/install_cuda.main#L67-L96","kind":"function","name":"main","path":"install_cuda.py","language":"python","start_line":67,"end_line":96,"context_start_line":47,"context_end_line":100,"code":" \"--override\",\n \"--toolkitpath=\" + install_path,\n \"--toolkit\",\n \"--silent\",\n ]\n\n print(f\"Running command: {' '.join(install_command)}\")\n\n try:\n subprocess.run(install_command, check=True)\n except subprocess.CalledProcessError as e:\n print(f\"Installation failed for CUDA version {version}: {e}\")\n return\n finally:\n # Delete the installer file\n os.remove(filepath)\n\n print(f\"CUDA version {version} installed at {install_path}\")\n\n\ndef main():\n user_base_path = os.path.expanduser(\"~/cuda\")\n system_base_path = \"/usr/local/cuda\"\n base_path = user_base_path # default to user-specific installation\n download_path = \"/tmp\" # default download path\n\n if len(sys.argv) < 2:\n print(\"Usage: python install_cuda.py [user/system] [download_path]\")\n sys.exit(1)\n\n version = sys.argv[1]\n if len(sys.argv) > 2:\n base_path = system_base_path if sys.argv[2] == \"system\" else user_base_path\n if len(sys.argv) > 3:\n download_path = sys.argv[3]\n\n if not os.path.exists(base_path):\n os.makedirs(base_path)\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Install CUDA version(s)\n if version == \"all\":\n for ver in cuda_versions:\n install_cuda(ver, base_path, download_path)\n elif version in cuda_versions:\n install_cuda(version, base_path, download_path)\n else:\n print(f\"Invalid CUDA version: {version}. Available versions are: {', '.join(cuda_versions.keys())}\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"095ec03a4ea2b7e323eddcb098024f3530560ea1ea8bfb677e246d50f72a7df3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers","uri":"program://bitsandbytes/module/tests.helpers#L1-L114","kind":"module","name":"tests.helpers","path":"tests/helpers.py","language":"python","start_line":1,"end_line":114,"context_start_line":1,"context_end_line":114,"code":"import functools\nfrom io import BytesIO\nfrom itertools import product\nimport os\nimport random\nfrom typing import Any\n\nimport torch\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\n\ntest_dims_rng = random.Random(42)\n\n\nTRUE_FALSE = (True, False)\nBOOLEAN_TRIPLES = list(product(TRUE_FALSE, repeat=3)) # all combinations of (bool, bool, bool)\nBOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (bool, bool)\n\n\n@functools.cache\ndef get_available_devices(no_cpu=False):\n if \"BNB_TEST_DEVICE\" in os.environ:\n # If the environment variable is set, use it directly.\n device = os.environ[\"BNB_TEST_DEVICE\"]\n return [] if no_cpu and device == \"cpu\" else [device]\n\n devices = [] if HIP_ENVIRONMENT else [\"cpu\"] if not no_cpu else []\n\n if hasattr(torch, \"accelerator\"):\n # PyTorch 2.6+ - determine accelerator using agnostic API.\n if torch.accelerator.is_available():\n devices += [str(torch.accelerator.current_accelerator())]\n else:\n if torch.cuda.is_available():\n devices += [\"cuda\"]\n\n if torch.backends.mps.is_available():\n devices += [\"mps\"]\n\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n devices += [\"xpu\"]\n\n custom_backend_name = torch._C._get_privateuse1_backend_name()\n custom_backend_module = getattr(torch, custom_backend_name, None)\n custom_backend_is_available_fn = getattr(custom_backend_module, \"is_available\", None)\n\n if custom_backend_is_available_fn and custom_backend_module.is_available():\n devices += [custom_backend_name]\n\n return devices\n\n\ndef torch_save_to_buffer(obj):\n buffer = BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n return buffer\n\n\ndef torch_load_from_buffer(buffer):\n buffer.seek(0)\n obj = torch.load(buffer, weights_only=False)\n buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n\ndef format_with_label(label: str, value: Any) -> str:\n if isinstance(value, bool):\n formatted = \"T\" if value else \"F\"\n elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):\n formatted = \"\".join(\"T\" if b else \"F\" for b in value)\n elif isinstance(value, torch.dtype):\n formatted = describe_dtype(value)\n else:\n formatted = str(value)\n return f\"{label}={formatted}\"\n\n\ndef id_formatter(label: str):\n \"\"\"\n Return a function that formats the value given to it with the given label.\n \"\"\"\n return lambda value: format_with_label(label, value)\n\n\nDTYPE_NAMES = {\n torch.bfloat16: \"bf16\",\n torch.bool: \"bool\",\n torch.float16: \"fp16\",\n torch.float32: \"fp32\",\n torch.float64: \"fp64\",\n torch.int32: \"int32\",\n torch.int64: \"int64\",\n torch.int8: \"int8\",\n}\n\n\ndef describe_dtype(dtype: torch.dtype) -> str:\n return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(\".\")[2]\n\n\ndef is_supported_on_hpu(\n quant_type: str = \"nf4\", dtype: torch.dtype = torch.bfloat16, quant_storage: torch.dtype = torch.uint8\n) -> bool:\n \"\"\"\n Check if the given quant_type, dtype and quant_storage are supported on HPU.\n \"\"\"\n if quant_type == \"fp4\" or dtype == torch.float16 or quant_storage not in (torch.uint8, torch.bfloat16):\n return False\n return True","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.get_available_devices","uri":"program://bitsandbytes/function/tests.helpers.get_available_devices#L21-L50","kind":"function","name":"get_available_devices","path":"tests/helpers.py","language":"python","start_line":21,"end_line":50,"context_start_line":1,"context_end_line":70,"code":"import functools\nfrom io import BytesIO\nfrom itertools import product\nimport os\nimport random\nfrom typing import Any\n\nimport torch\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\n\ntest_dims_rng = random.Random(42)\n\n\nTRUE_FALSE = (True, False)\nBOOLEAN_TRIPLES = list(product(TRUE_FALSE, repeat=3)) # all combinations of (bool, bool, bool)\nBOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (bool, bool)\n\n\n@functools.cache\ndef get_available_devices(no_cpu=False):\n if \"BNB_TEST_DEVICE\" in os.environ:\n # If the environment variable is set, use it directly.\n device = os.environ[\"BNB_TEST_DEVICE\"]\n return [] if no_cpu and device == \"cpu\" else [device]\n\n devices = [] if HIP_ENVIRONMENT else [\"cpu\"] if not no_cpu else []\n\n if hasattr(torch, \"accelerator\"):\n # PyTorch 2.6+ - determine accelerator using agnostic API.\n if torch.accelerator.is_available():\n devices += [str(torch.accelerator.current_accelerator())]\n else:\n if torch.cuda.is_available():\n devices += [\"cuda\"]\n\n if torch.backends.mps.is_available():\n devices += [\"mps\"]\n\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n devices += [\"xpu\"]\n\n custom_backend_name = torch._C._get_privateuse1_backend_name()\n custom_backend_module = getattr(torch, custom_backend_name, None)\n custom_backend_is_available_fn = getattr(custom_backend_module, \"is_available\", None)\n\n if custom_backend_is_available_fn and custom_backend_module.is_available():\n devices += [custom_backend_name]\n\n return devices\n\n\ndef torch_save_to_buffer(obj):\n buffer = BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n return buffer\n\n\ndef torch_load_from_buffer(buffer):\n buffer.seek(0)\n obj = torch.load(buffer, weights_only=False)\n buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.torch_save_to_buffer","uri":"program://bitsandbytes/function/tests.helpers.torch_save_to_buffer#L53-L57","kind":"function","name":"torch_save_to_buffer","path":"tests/helpers.py","language":"python","start_line":53,"end_line":57,"context_start_line":33,"context_end_line":77,"code":" else:\n if torch.cuda.is_available():\n devices += [\"cuda\"]\n\n if torch.backends.mps.is_available():\n devices += [\"mps\"]\n\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n devices += [\"xpu\"]\n\n custom_backend_name = torch._C._get_privateuse1_backend_name()\n custom_backend_module = getattr(torch, custom_backend_name, None)\n custom_backend_is_available_fn = getattr(custom_backend_module, \"is_available\", None)\n\n if custom_backend_is_available_fn and custom_backend_module.is_available():\n devices += [custom_backend_name]\n\n return devices\n\n\ndef torch_save_to_buffer(obj):\n buffer = BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n return buffer\n\n\ndef torch_load_from_buffer(buffer):\n buffer.seek(0)\n obj = torch.load(buffer, weights_only=False)\n buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n\ndef format_with_label(label: str, value: Any) -> str:\n if isinstance(value, bool):\n formatted = \"T\" if value else \"F\"\n elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):\n formatted = \"\".join(\"T\" if b else \"F\" for b in value)\n elif isinstance(value, torch.dtype):\n formatted = describe_dtype(value)","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.torch_load_from_buffer","uri":"program://bitsandbytes/function/tests.helpers.torch_load_from_buffer#L60-L64","kind":"function","name":"torch_load_from_buffer","path":"tests/helpers.py","language":"python","start_line":60,"end_line":64,"context_start_line":40,"context_end_line":84,"code":" if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n devices += [\"xpu\"]\n\n custom_backend_name = torch._C._get_privateuse1_backend_name()\n custom_backend_module = getattr(torch, custom_backend_name, None)\n custom_backend_is_available_fn = getattr(custom_backend_module, \"is_available\", None)\n\n if custom_backend_is_available_fn and custom_backend_module.is_available():\n devices += [custom_backend_name]\n\n return devices\n\n\ndef torch_save_to_buffer(obj):\n buffer = BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n return buffer\n\n\ndef torch_load_from_buffer(buffer):\n buffer.seek(0)\n obj = torch.load(buffer, weights_only=False)\n buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n\ndef format_with_label(label: str, value: Any) -> str:\n if isinstance(value, bool):\n formatted = \"T\" if value else \"F\"\n elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):\n formatted = \"\".join(\"T\" if b else \"F\" for b in value)\n elif isinstance(value, torch.dtype):\n formatted = describe_dtype(value)\n else:\n formatted = str(value)\n return f\"{label}={formatted}\"\n\n\ndef id_formatter(label: str):\n \"\"\"","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.get_test_dims","uri":"program://bitsandbytes/function/tests.helpers.get_test_dims#L67-L68","kind":"function","name":"get_test_dims","path":"tests/helpers.py","language":"python","start_line":67,"end_line":68,"context_start_line":47,"context_end_line":88,"code":" if custom_backend_is_available_fn and custom_backend_module.is_available():\n devices += [custom_backend_name]\n\n return devices\n\n\ndef torch_save_to_buffer(obj):\n buffer = BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n return buffer\n\n\ndef torch_load_from_buffer(buffer):\n buffer.seek(0)\n obj = torch.load(buffer, weights_only=False)\n buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n\ndef format_with_label(label: str, value: Any) -> str:\n if isinstance(value, bool):\n formatted = \"T\" if value else \"F\"\n elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):\n formatted = \"\".join(\"T\" if b else \"F\" for b in value)\n elif isinstance(value, torch.dtype):\n formatted = describe_dtype(value)\n else:\n formatted = str(value)\n return f\"{label}={formatted}\"\n\n\ndef id_formatter(label: str):\n \"\"\"\n Return a function that formats the value given to it with the given label.\n \"\"\"\n return lambda value: format_with_label(label, value)\n","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.format_with_label","uri":"program://bitsandbytes/function/tests.helpers.format_with_label#L71-L80","kind":"function","name":"format_with_label","path":"tests/helpers.py","language":"python","start_line":71,"end_line":80,"context_start_line":51,"context_end_line":100,"code":"\n\ndef torch_save_to_buffer(obj):\n buffer = BytesIO()\n torch.save(obj, buffer)\n buffer.seek(0)\n return buffer\n\n\ndef torch_load_from_buffer(buffer):\n buffer.seek(0)\n obj = torch.load(buffer, weights_only=False)\n buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n\ndef format_with_label(label: str, value: Any) -> str:\n if isinstance(value, bool):\n formatted = \"T\" if value else \"F\"\n elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):\n formatted = \"\".join(\"T\" if b else \"F\" for b in value)\n elif isinstance(value, torch.dtype):\n formatted = describe_dtype(value)\n else:\n formatted = str(value)\n return f\"{label}={formatted}\"\n\n\ndef id_formatter(label: str):\n \"\"\"\n Return a function that formats the value given to it with the given label.\n \"\"\"\n return lambda value: format_with_label(label, value)\n\n\nDTYPE_NAMES = {\n torch.bfloat16: \"bf16\",\n torch.bool: \"bool\",\n torch.float16: \"fp16\",\n torch.float32: \"fp32\",\n torch.float64: \"fp64\",\n torch.int32: \"int32\",\n torch.int64: \"int64\",\n torch.int8: \"int8\",\n}\n","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.id_formatter","uri":"program://bitsandbytes/function/tests.helpers.id_formatter#L83-L87","kind":"function","name":"id_formatter","path":"tests/helpers.py","language":"python","start_line":83,"end_line":87,"context_start_line":63,"context_end_line":107,"code":" buffer.seek(0)\n return obj\n\n\ndef get_test_dims(min: int, max: int, *, n: int) -> list[int]:\n return [test_dims_rng.randint(min, max) for _ in range(n)]\n\n\ndef format_with_label(label: str, value: Any) -> str:\n if isinstance(value, bool):\n formatted = \"T\" if value else \"F\"\n elif isinstance(value, (list, tuple)) and all(isinstance(v, bool) for v in value):\n formatted = \"\".join(\"T\" if b else \"F\" for b in value)\n elif isinstance(value, torch.dtype):\n formatted = describe_dtype(value)\n else:\n formatted = str(value)\n return f\"{label}={formatted}\"\n\n\ndef id_formatter(label: str):\n \"\"\"\n Return a function that formats the value given to it with the given label.\n \"\"\"\n return lambda value: format_with_label(label, value)\n\n\nDTYPE_NAMES = {\n torch.bfloat16: \"bf16\",\n torch.bool: \"bool\",\n torch.float16: \"fp16\",\n torch.float32: \"fp32\",\n torch.float64: \"fp64\",\n torch.int32: \"int32\",\n torch.int64: \"int64\",\n torch.int8: \"int8\",\n}\n\n\ndef describe_dtype(dtype: torch.dtype) -> str:\n return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(\".\")[2]\n\n\ndef is_supported_on_hpu(\n quant_type: str = \"nf4\", dtype: torch.dtype = torch.bfloat16, quant_storage: torch.dtype = torch.uint8","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.describe_dtype","uri":"program://bitsandbytes/function/tests.helpers.describe_dtype#L102-L103","kind":"function","name":"describe_dtype","path":"tests/helpers.py","language":"python","start_line":102,"end_line":103,"context_start_line":82,"context_end_line":114,"code":"\ndef id_formatter(label: str):\n \"\"\"\n Return a function that formats the value given to it with the given label.\n \"\"\"\n return lambda value: format_with_label(label, value)\n\n\nDTYPE_NAMES = {\n torch.bfloat16: \"bf16\",\n torch.bool: \"bool\",\n torch.float16: \"fp16\",\n torch.float32: \"fp32\",\n torch.float64: \"fp64\",\n torch.int32: \"int32\",\n torch.int64: \"int64\",\n torch.int8: \"int8\",\n}\n\n\ndef describe_dtype(dtype: torch.dtype) -> str:\n return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(\".\")[2]\n\n\ndef is_supported_on_hpu(\n quant_type: str = \"nf4\", dtype: torch.dtype = torch.bfloat16, quant_storage: torch.dtype = torch.uint8\n) -> bool:\n \"\"\"\n Check if the given quant_type, dtype and quant_storage are supported on HPU.\n \"\"\"\n if quant_type == \"fp4\" or dtype == torch.float16 or quant_storage not in (torch.uint8, torch.bfloat16):\n return False\n return True","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.helpers.is_supported_on_hpu","uri":"program://bitsandbytes/function/tests.helpers.is_supported_on_hpu#L106-L114","kind":"function","name":"is_supported_on_hpu","path":"tests/helpers.py","language":"python","start_line":106,"end_line":114,"context_start_line":86,"context_end_line":114,"code":" \"\"\"\n return lambda value: format_with_label(label, value)\n\n\nDTYPE_NAMES = {\n torch.bfloat16: \"bf16\",\n torch.bool: \"bool\",\n torch.float16: \"fp16\",\n torch.float32: \"fp32\",\n torch.float64: \"fp64\",\n torch.int32: \"int32\",\n torch.int64: \"int64\",\n torch.int8: \"int8\",\n}\n\n\ndef describe_dtype(dtype: torch.dtype) -> str:\n return DTYPE_NAMES.get(dtype) or str(dtype).rpartition(\".\")[2]\n\n\ndef is_supported_on_hpu(\n quant_type: str = \"nf4\", dtype: torch.dtype = torch.bfloat16, quant_storage: torch.dtype = torch.uint8\n) -> bool:\n \"\"\"\n Check if the given quant_type, dtype and quant_storage are supported on HPU.\n \"\"\"\n if quant_type == \"fp4\" or dtype == torch.float16 or quant_storage not in (torch.uint8, torch.bfloat16):\n return False\n return True","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.conftest","uri":"program://bitsandbytes/module/tests.conftest#L1-L46","kind":"module","name":"tests.conftest","path":"tests/conftest.py","language":"python","start_line":1,"end_line":46,"context_start_line":1,"context_end_line":46,"code":"import gc\nimport random\n\nimport numpy as np\nimport pytest\nimport torch\n\n\ndef _set_seed():\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.mps.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n\n\ndef pytest_runtest_call(item):\n try:\n _set_seed()\n item.runtest()\n except AssertionError as ae:\n if str(ae) == \"Torch not compiled with CUDA enabled\":\n pytest.skip(\"Torch not compiled with CUDA enabled\")\n raise\n except RuntimeError as re:\n # CUDA-enabled Torch build, but no CUDA-capable device found\n if \"Found no NVIDIA driver on your system\" in str(re):\n pytest.skip(\"No NVIDIA driver found\")\n raise\n\n\n@pytest.hookimpl(trylast=True)\ndef pytest_runtest_teardown(item, nextitem):\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n elif torch.backends.mps.is_available() and torch.backends.mps.is_built():\n torch.mps.empty_cache()\n\n\n@pytest.fixture(scope=\"session\")\ndef requires_cuda() -> bool:\n cuda_available = torch.cuda.is_available()\n if not cuda_available:\n pytest.skip(\"CUDA is required\")\n return cuda_available","source_hash":"4efdf5952b7fb2b9a0fa16963063486f79bcb77c6f36de9f9048d366987009e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.conftest._set_seed","uri":"program://bitsandbytes/function/tests.conftest._set_seed#L9-L14","kind":"function","name":"_set_seed","path":"tests/conftest.py","language":"python","start_line":9,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"import gc\nimport random\n\nimport numpy as np\nimport pytest\nimport torch\n\n\ndef _set_seed():\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.mps.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n\n\ndef pytest_runtest_call(item):\n try:\n _set_seed()\n item.runtest()\n except AssertionError as ae:\n if str(ae) == \"Torch not compiled with CUDA enabled\":\n pytest.skip(\"Torch not compiled with CUDA enabled\")\n raise\n except RuntimeError as re:\n # CUDA-enabled Torch build, but no CUDA-capable device found\n if \"Found no NVIDIA driver on your system\" in str(re):\n pytest.skip(\"No NVIDIA driver found\")\n raise\n\n\n@pytest.hookimpl(trylast=True)\ndef pytest_runtest_teardown(item, nextitem):\n gc.collect()","source_hash":"4efdf5952b7fb2b9a0fa16963063486f79bcb77c6f36de9f9048d366987009e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.conftest.pytest_runtest_call","uri":"program://bitsandbytes/function/tests.conftest.pytest_runtest_call#L17-L29","kind":"function","name":"pytest_runtest_call","path":"tests/conftest.py","language":"python","start_line":17,"end_line":29,"context_start_line":1,"context_end_line":46,"code":"import gc\nimport random\n\nimport numpy as np\nimport pytest\nimport torch\n\n\ndef _set_seed():\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.mps.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n\n\ndef pytest_runtest_call(item):\n try:\n _set_seed()\n item.runtest()\n except AssertionError as ae:\n if str(ae) == \"Torch not compiled with CUDA enabled\":\n pytest.skip(\"Torch not compiled with CUDA enabled\")\n raise\n except RuntimeError as re:\n # CUDA-enabled Torch build, but no CUDA-capable device found\n if \"Found no NVIDIA driver on your system\" in str(re):\n pytest.skip(\"No NVIDIA driver found\")\n raise\n\n\n@pytest.hookimpl(trylast=True)\ndef pytest_runtest_teardown(item, nextitem):\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n elif torch.backends.mps.is_available() and torch.backends.mps.is_built():\n torch.mps.empty_cache()\n\n\n@pytest.fixture(scope=\"session\")\ndef requires_cuda() -> bool:\n cuda_available = torch.cuda.is_available()\n if not cuda_available:\n pytest.skip(\"CUDA is required\")\n return cuda_available","source_hash":"4efdf5952b7fb2b9a0fa16963063486f79bcb77c6f36de9f9048d366987009e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.conftest.pytest_runtest_teardown","uri":"program://bitsandbytes/function/tests.conftest.pytest_runtest_teardown#L33-L38","kind":"function","name":"pytest_runtest_teardown","path":"tests/conftest.py","language":"python","start_line":33,"end_line":38,"context_start_line":13,"context_end_line":46,"code":" np.random.seed(0)\n random.seed(0)\n\n\ndef pytest_runtest_call(item):\n try:\n _set_seed()\n item.runtest()\n except AssertionError as ae:\n if str(ae) == \"Torch not compiled with CUDA enabled\":\n pytest.skip(\"Torch not compiled with CUDA enabled\")\n raise\n except RuntimeError as re:\n # CUDA-enabled Torch build, but no CUDA-capable device found\n if \"Found no NVIDIA driver on your system\" in str(re):\n pytest.skip(\"No NVIDIA driver found\")\n raise\n\n\n@pytest.hookimpl(trylast=True)\ndef pytest_runtest_teardown(item, nextitem):\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n elif torch.backends.mps.is_available() and torch.backends.mps.is_built():\n torch.mps.empty_cache()\n\n\n@pytest.fixture(scope=\"session\")\ndef requires_cuda() -> bool:\n cuda_available = torch.cuda.is_available()\n if not cuda_available:\n pytest.skip(\"CUDA is required\")\n return cuda_available","source_hash":"4efdf5952b7fb2b9a0fa16963063486f79bcb77c6f36de9f9048d366987009e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.conftest.requires_cuda","uri":"program://bitsandbytes/function/tests.conftest.requires_cuda#L42-L46","kind":"function","name":"requires_cuda","path":"tests/conftest.py","language":"python","start_line":42,"end_line":46,"context_start_line":22,"context_end_line":46,"code":" if str(ae) == \"Torch not compiled with CUDA enabled\":\n pytest.skip(\"Torch not compiled with CUDA enabled\")\n raise\n except RuntimeError as re:\n # CUDA-enabled Torch build, but no CUDA-capable device found\n if \"Found no NVIDIA driver on your system\" in str(re):\n pytest.skip(\"No NVIDIA driver found\")\n raise\n\n\n@pytest.hookimpl(trylast=True)\ndef pytest_runtest_teardown(item, nextitem):\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n elif torch.backends.mps.is_available() and torch.backends.mps.is_built():\n torch.mps.empty_cache()\n\n\n@pytest.fixture(scope=\"session\")\ndef requires_cuda() -> bool:\n cuda_available = torch.cuda.is_available()\n if not cuda_available:\n pytest.skip(\"CUDA is required\")\n return cuda_available","source_hash":"4efdf5952b7fb2b9a0fa16963063486f79bcb77c6f36de9f9048d366987009e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt","uri":"program://bitsandbytes/module/tests.test_linear8bitlt#L1-L293","kind":"module","name":"tests.test_linear8bitlt","path":"tests/test_linear8bitlt.py","language":"python","start_line":1,"end_line":293,"context_start_line":1,"context_end_line":293,"code":"from contextlib import nullcontext\nimport copy\nimport os\nimport pickle\nimport platform\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.nn.modules import Linear8bitLt\nfrom tests.helpers import (\n TRUE_FALSE,\n get_available_devices,\n id_formatter,\n torch_load_from_buffer,\n torch_save_to_buffer,\n)\n\n\n# contributed by Alex Borzunov, see:\n# https://github.com/bigscience-workshop/petals/blob/main/tests/test_linear8bitlt.py\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_linear_no_igemmlt(device):\n linear = torch.nn.Linear(1024, 3072)\n x = torch.randn(3, 1024, dtype=torch.half)\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=False,\n threshold=6.0,\n )\n\n # TODO: Remove, this is no longer implemented\n linear_custom.state.force_no_igemmlt = True\n\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n ).to(linear.weight.dtype)\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.to(device)\n linear = linear.half().to(device)\n\n x_ref = x.clone().to(device).requires_grad_(True)\n x_ours = x.clone().to(device).requires_grad_(True)\n fx_ref = linear(x_ref).float()\n grad_proj = torch.randn_like(fx_ref)\n (fx_ref * grad_proj).mean().backward()\n\n fx_ours = linear_custom(x_ours).float()\n (fx_ours * grad_proj).mean().backward()\n\n assert linear_custom.state.CB is not None\n assert not linear_custom.state.has_fp16_weights\n\n idx = torch.isclose(fx_ref, fx_ours, atol=0.02, rtol=1e-5)\n assert (idx == 0).sum().item() < fx_ref.numel() * 2.5e-4\n torch.testing.assert_close(fx_ref, fx_ours, atol=0.03, rtol=1e-5)\n torch.testing.assert_close(x_ref.grad, x_ours.grad, atol=0.01, rtol=1e-5)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"has_fp16_weights\", TRUE_FALSE, ids=id_formatter(\"has_fp16_weights\"))\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"serialize_before_forward\", TRUE_FALSE, ids=id_formatter(\"serialize_before_forward\"))\n@pytest.mark.parametrize(\"deserialize_before_cuda\", TRUE_FALSE, ids=id_formatter(\"deserialize_before_cuda\"))\n@pytest.mark.parametrize(\"save_before_forward\", TRUE_FALSE, ids=id_formatter(\"save_before_forward\"))\n@pytest.mark.parametrize(\"load_before_cuda\", TRUE_FALSE, ids=id_formatter(\"load_before_cuda\"))\ndef test_linear_serialization(\n device,\n has_fp16_weights,\n threshold,\n serialize_before_forward,\n deserialize_before_cuda,\n save_before_forward,\n load_before_cuda,\n):\n if device != \"cuda\" and has_fp16_weights:\n pytest.skip(\"has_fp16_weights is only supported on CUDA and is deprecated\")\n\n linear = torch.nn.Linear(32, 96)\n # TODO: Fallback for bad shapes\n x = torch.randn(4, 32, dtype=torch.half)\n # x = torch.randn(3, 32, dtype=torch.half)\n\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=has_fp16_weights,\n has_fp16_weights=has_fp16_weights,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.to(device)\n\n if serialize_before_forward:\n state_dict_8bit = linear_custom.state_dict()\n\n if save_before_forward:\n bytes_8bit = torch_save_to_buffer(linear_custom)\n\n x_first = x.clone().to(device).requires_grad_(True)\n fx_first = linear_custom(x_first).float()\n grad_proj = torch.randn_like(fx_first)\n (fx_first * grad_proj).mean().backward()\n\n if not serialize_before_forward:\n state_dict_8bit = linear_custom.state_dict()\n\n if not save_before_forward:\n bytes_8bit = torch_save_to_buffer(linear_custom)\n\n with TemporaryDirectory() as tmpdir:\n state_path_8bit = os.path.join(tmpdir, \"state_8bit.pth\")\n state_path = os.path.join(tmpdir, \"state.pth\")\n\n torch.save(linear.state_dict(), state_path)\n torch.save(state_dict_8bit, state_path_8bit)\n\n if not has_fp16_weights:\n assert os.path.getsize(state_path_8bit) < 0.5 * os.path.getsize(state_path)\n\n new_state_dict = torch.load(state_path_8bit, weights_only=False)\n\n new_linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n if deserialize_before_cuda:\n with nullcontext() if has_fp16_weights else pytest.raises(RuntimeError):\n new_linear_custom.load_state_dict(new_state_dict, strict=True)\n\n if load_before_cuda:\n new_linear_custom2 = torch_load_from_buffer(bytes_8bit)\n\n new_linear_custom = new_linear_custom.to(device)\n\n if not deserialize_before_cuda:\n new_linear_custom.load_state_dict(new_state_dict, strict=True)\n\n if not load_before_cuda:\n new_linear_custom2 = torch_load_from_buffer(bytes_8bit)\n\n x_second = x.clone().to(device).requires_grad_(True)\n fx_second = new_linear_custom(x_second).float()\n (fx_second * grad_proj).mean().backward()\n\n x_third = x.clone().to(device).requires_grad_(True)\n fx_third = new_linear_custom2(x_third).float()\n (fx_third * grad_proj).mean().backward()\n\n # if 8-bit weights were loaded before .cuda, state is incorrect anyway and RuntimeError was raised\n if has_fp16_weights or not deserialize_before_cuda:\n assert torch.allclose(fx_first, fx_second, atol=1e-5)\n assert torch.allclose(x_first.grad, x_second.grad, atol=1e-5)\n assert torch.allclose(fx_first, fx_third, atol=1e-5)\n assert torch.allclose(x_first.grad, x_third.grad, atol=1e-5)\n\n\n@pytest.fixture\ndef linear8bit(requires_cuda):\n linear = torch.nn.Linear(32, 96)\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=False,\n threshold=6.0,\n )\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.cuda()\n return linear_custom\n\n\ndef test_linear8bit_copy_param(linear8bit):\n shallow_copy = copy.copy(linear8bit)\n assert linear8bit.weight is shallow_copy.weight\n assert linear8bit.bias is shallow_copy.bias\n assert linear8bit.weight.data.data_ptr() == shallow_copy.weight.data.data_ptr()\n\n\ndef test_linear8bit_deepcopy_param(linear8bit):\n deep_copy = copy.deepcopy(linear8bit)\n assert linear8bit.weight is not deep_copy.weight\n assert linear8bit.bias is not deep_copy.bias\n assert linear8bit.weight.data.data_ptr() != deep_copy.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deep_copy.weight.data)\n assert linear8bit.state == deep_copy.state\n\n # check for a bug where SCB and CB were not copied\n assert deep_copy.weight.SCB is not None\n assert (linear8bit.weight.SCB == deep_copy.weight.SCB).all()\n assert deep_copy.weight.CB is not None\n assert (linear8bit.weight.CB == deep_copy.weight.CB).all()\n\n\ndef test_linear8bit_serialization(linear8bit):\n serialized = pickle.dumps(linear8bit)\n deserialized = pickle.loads(serialized)\n assert linear8bit.weight.data.data_ptr() != deserialized.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deserialized.weight.data)\n assert linear8bit.bias.data.data_ptr() != deserialized.bias.data.data_ptr()\n assert torch.allclose(linear8bit.bias.data, deserialized.bias.data)\n assert linear8bit.state == deserialized.state\n\n # check for a bug where SCB and CB were not copied\n assert (linear8bit.weight.SCB == deserialized.weight.SCB).all()\n assert (linear8bit.weight.CB == deserialized.weight.CB).all()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))\n@pytest.mark.parametrize(\"mode\", [\"default\", \"reduce-overhead\"], ids=id_formatter(\"mode\"))\n@pytest.mark.skipif(torch.__version__ < (2, 4), reason=\"Not supported in torch < 2.4\")\ndef test_linear8bitlt_torch_compile(device, threshold, bias, fullgraph, mode):\n if device == \"cuda\" and platform.system() == \"Windows\":\n pytest.skip(\"Triton is not officially supported on Windows\")\n\n dim = 256\n batch_size = 16\n\n torch.compiler.reset()\n\n # Create a small network with Linear8bitLt layers\n net = torch.nn.Sequential(\n *[bnb.nn.Linear8bitLt(dim, dim, bias=bias, has_fp16_weights=False, threshold=threshold) for _ in range(4)]\n ).to(device)\n\n dynamic_output_shapes = fullgraph and threshold > 0\n with torch._dynamo.config.patch(\"capture_dynamic_output_shape_ops\", dynamic_output_shapes):\n # Create input tensor\n x = torch.randn(batch_size, dim, dtype=torch.float16, device=device)\n\n # Get reference output before compilation\n with torch.no_grad():\n ref_output = net(x)\n\n # Compile the model\n compile_backend = \"hpu_backend\" if device == \"hpu\" else \"inductor\"\n compiled_net = torch.compile(net, fullgraph=fullgraph, mode=mode, backend=compile_backend)\n\n # Get output from compiled model\n with torch.no_grad():\n compiled_output = compiled_net(x)\n\n # Check outputs match\n assert compiled_output.shape == ref_output.shape\n assert compiled_output.device == ref_output.device\n assert compiled_output.dtype == ref_output.dtype\n torch.testing.assert_close(compiled_output, ref_output)\n\n # Test with gradients. Currently only works with threshold=0.\n # Has a strange regression on Linux aarch64 CPU in torch==2.6.0.\n is_broken_platform = (\n device == \"cpu\"\n and platform.system() == \"Linux\"\n and platform.machine() == \"aarch64\"\n and (2, 6) <= torch.__version__ < (2, 7)\n )\n\n if threshold == 0 and not is_broken_platform:\n x.requires_grad_(True)\n y1 = net(x).sum()\n y1.backward()\n grad_ref = x.grad.clone()\n\n x.grad = None\n y2 = compiled_net(x).sum()\n y2.backward()\n grad_compiled = x.grad.clone()\n\n torch.testing.assert_close(grad_compiled, grad_ref)","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.test_linear_no_igemmlt","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.test_linear_no_igemmlt#L25-L63","kind":"function","name":"test_linear_no_igemmlt","path":"tests/test_linear8bitlt.py","language":"python","start_line":25,"end_line":63,"context_start_line":5,"context_end_line":83,"code":"import platform\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.nn.modules import Linear8bitLt\nfrom tests.helpers import (\n TRUE_FALSE,\n get_available_devices,\n id_formatter,\n torch_load_from_buffer,\n torch_save_to_buffer,\n)\n\n\n# contributed by Alex Borzunov, see:\n# https://github.com/bigscience-workshop/petals/blob/main/tests/test_linear8bitlt.py\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_linear_no_igemmlt(device):\n linear = torch.nn.Linear(1024, 3072)\n x = torch.randn(3, 1024, dtype=torch.half)\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=False,\n threshold=6.0,\n )\n\n # TODO: Remove, this is no longer implemented\n linear_custom.state.force_no_igemmlt = True\n\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n ).to(linear.weight.dtype)\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.to(device)\n linear = linear.half().to(device)\n\n x_ref = x.clone().to(device).requires_grad_(True)\n x_ours = x.clone().to(device).requires_grad_(True)\n fx_ref = linear(x_ref).float()\n grad_proj = torch.randn_like(fx_ref)\n (fx_ref * grad_proj).mean().backward()\n\n fx_ours = linear_custom(x_ours).float()\n (fx_ours * grad_proj).mean().backward()\n\n assert linear_custom.state.CB is not None\n assert not linear_custom.state.has_fp16_weights\n\n idx = torch.isclose(fx_ref, fx_ours, atol=0.02, rtol=1e-5)\n assert (idx == 0).sum().item() < fx_ref.numel() * 2.5e-4\n torch.testing.assert_close(fx_ref, fx_ours, atol=0.03, rtol=1e-5)\n torch.testing.assert_close(x_ref.grad, x_ours.grad, atol=0.01, rtol=1e-5)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"has_fp16_weights\", TRUE_FALSE, ids=id_formatter(\"has_fp16_weights\"))\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"serialize_before_forward\", TRUE_FALSE, ids=id_formatter(\"serialize_before_forward\"))\n@pytest.mark.parametrize(\"deserialize_before_cuda\", TRUE_FALSE, ids=id_formatter(\"deserialize_before_cuda\"))\n@pytest.mark.parametrize(\"save_before_forward\", TRUE_FALSE, ids=id_formatter(\"save_before_forward\"))\n@pytest.mark.parametrize(\"load_before_cuda\", TRUE_FALSE, ids=id_formatter(\"load_before_cuda\"))\ndef test_linear_serialization(\n device,\n has_fp16_weights,\n threshold,\n serialize_before_forward,\n deserialize_before_cuda,\n save_before_forward,\n load_before_cuda,\n):\n if device != \"cuda\" and has_fp16_weights:\n pytest.skip(\"has_fp16_weights is only supported on CUDA and is deprecated\")","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.test_linear_serialization","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.test_linear_serialization#L73-L171","kind":"function","name":"test_linear_serialization","path":"tests/test_linear8bitlt.py","language":"python","start_line":73,"end_line":171,"context_start_line":53,"context_end_line":191,"code":"\n fx_ours = linear_custom(x_ours).float()\n (fx_ours * grad_proj).mean().backward()\n\n assert linear_custom.state.CB is not None\n assert not linear_custom.state.has_fp16_weights\n\n idx = torch.isclose(fx_ref, fx_ours, atol=0.02, rtol=1e-5)\n assert (idx == 0).sum().item() < fx_ref.numel() * 2.5e-4\n torch.testing.assert_close(fx_ref, fx_ours, atol=0.03, rtol=1e-5)\n torch.testing.assert_close(x_ref.grad, x_ours.grad, atol=0.01, rtol=1e-5)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"has_fp16_weights\", TRUE_FALSE, ids=id_formatter(\"has_fp16_weights\"))\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"serialize_before_forward\", TRUE_FALSE, ids=id_formatter(\"serialize_before_forward\"))\n@pytest.mark.parametrize(\"deserialize_before_cuda\", TRUE_FALSE, ids=id_formatter(\"deserialize_before_cuda\"))\n@pytest.mark.parametrize(\"save_before_forward\", TRUE_FALSE, ids=id_formatter(\"save_before_forward\"))\n@pytest.mark.parametrize(\"load_before_cuda\", TRUE_FALSE, ids=id_formatter(\"load_before_cuda\"))\ndef test_linear_serialization(\n device,\n has_fp16_weights,\n threshold,\n serialize_before_forward,\n deserialize_before_cuda,\n save_before_forward,\n load_before_cuda,\n):\n if device != \"cuda\" and has_fp16_weights:\n pytest.skip(\"has_fp16_weights is only supported on CUDA and is deprecated\")\n\n linear = torch.nn.Linear(32, 96)\n # TODO: Fallback for bad shapes\n x = torch.randn(4, 32, dtype=torch.half)\n # x = torch.randn(3, 32, dtype=torch.half)\n\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=has_fp16_weights,\n has_fp16_weights=has_fp16_weights,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.to(device)\n\n if serialize_before_forward:\n state_dict_8bit = linear_custom.state_dict()\n\n if save_before_forward:\n bytes_8bit = torch_save_to_buffer(linear_custom)\n\n x_first = x.clone().to(device).requires_grad_(True)\n fx_first = linear_custom(x_first).float()\n grad_proj = torch.randn_like(fx_first)\n (fx_first * grad_proj).mean().backward()\n\n if not serialize_before_forward:\n state_dict_8bit = linear_custom.state_dict()\n\n if not save_before_forward:\n bytes_8bit = torch_save_to_buffer(linear_custom)\n\n with TemporaryDirectory() as tmpdir:\n state_path_8bit = os.path.join(tmpdir, \"state_8bit.pth\")\n state_path = os.path.join(tmpdir, \"state.pth\")\n\n torch.save(linear.state_dict(), state_path)\n torch.save(state_dict_8bit, state_path_8bit)\n\n if not has_fp16_weights:\n assert os.path.getsize(state_path_8bit) < 0.5 * os.path.getsize(state_path)\n\n new_state_dict = torch.load(state_path_8bit, weights_only=False)\n\n new_linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n if deserialize_before_cuda:\n with nullcontext() if has_fp16_weights else pytest.raises(RuntimeError):\n new_linear_custom.load_state_dict(new_state_dict, strict=True)\n\n if load_before_cuda:\n new_linear_custom2 = torch_load_from_buffer(bytes_8bit)\n\n new_linear_custom = new_linear_custom.to(device)\n\n if not deserialize_before_cuda:\n new_linear_custom.load_state_dict(new_state_dict, strict=True)\n\n if not load_before_cuda:\n new_linear_custom2 = torch_load_from_buffer(bytes_8bit)\n\n x_second = x.clone().to(device).requires_grad_(True)\n fx_second = new_linear_custom(x_second).float()\n (fx_second * grad_proj).mean().backward()\n\n x_third = x.clone().to(device).requires_grad_(True)\n fx_third = new_linear_custom2(x_third).float()\n (fx_third * grad_proj).mean().backward()\n\n # if 8-bit weights were loaded before .cuda, state is incorrect anyway and RuntimeError was raised\n if has_fp16_weights or not deserialize_before_cuda:\n assert torch.allclose(fx_first, fx_second, atol=1e-5)\n assert torch.allclose(x_first.grad, x_second.grad, atol=1e-5)\n assert torch.allclose(fx_first, fx_third, atol=1e-5)\n assert torch.allclose(x_first.grad, x_third.grad, atol=1e-5)\n\n\n@pytest.fixture\ndef linear8bit(requires_cuda):\n linear = torch.nn.Linear(32, 96)\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=False,\n threshold=6.0,\n )\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.cuda()\n return linear_custom","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.linear8bit","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.linear8bit#L175-L191","kind":"function","name":"linear8bit","path":"tests/test_linear8bitlt.py","language":"python","start_line":175,"end_line":191,"context_start_line":155,"context_end_line":211,"code":" if not load_before_cuda:\n new_linear_custom2 = torch_load_from_buffer(bytes_8bit)\n\n x_second = x.clone().to(device).requires_grad_(True)\n fx_second = new_linear_custom(x_second).float()\n (fx_second * grad_proj).mean().backward()\n\n x_third = x.clone().to(device).requires_grad_(True)\n fx_third = new_linear_custom2(x_third).float()\n (fx_third * grad_proj).mean().backward()\n\n # if 8-bit weights were loaded before .cuda, state is incorrect anyway and RuntimeError was raised\n if has_fp16_weights or not deserialize_before_cuda:\n assert torch.allclose(fx_first, fx_second, atol=1e-5)\n assert torch.allclose(x_first.grad, x_second.grad, atol=1e-5)\n assert torch.allclose(fx_first, fx_third, atol=1e-5)\n assert torch.allclose(x_first.grad, x_third.grad, atol=1e-5)\n\n\n@pytest.fixture\ndef linear8bit(requires_cuda):\n linear = torch.nn.Linear(32, 96)\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=False,\n threshold=6.0,\n )\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.cuda()\n return linear_custom\n\n\ndef test_linear8bit_copy_param(linear8bit):\n shallow_copy = copy.copy(linear8bit)\n assert linear8bit.weight is shallow_copy.weight\n assert linear8bit.bias is shallow_copy.bias\n assert linear8bit.weight.data.data_ptr() == shallow_copy.weight.data.data_ptr()\n\n\ndef test_linear8bit_deepcopy_param(linear8bit):\n deep_copy = copy.deepcopy(linear8bit)\n assert linear8bit.weight is not deep_copy.weight\n assert linear8bit.bias is not deep_copy.bias\n assert linear8bit.weight.data.data_ptr() != deep_copy.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deep_copy.weight.data)\n assert linear8bit.state == deep_copy.state\n\n # check for a bug where SCB and CB were not copied\n assert deep_copy.weight.SCB is not None\n assert (linear8bit.weight.SCB == deep_copy.weight.SCB).all()","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.test_linear8bit_copy_param","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.test_linear8bit_copy_param#L194-L198","kind":"function","name":"test_linear8bit_copy_param","path":"tests/test_linear8bitlt.py","language":"python","start_line":194,"end_line":198,"context_start_line":174,"context_end_line":218,"code":"@pytest.fixture\ndef linear8bit(requires_cuda):\n linear = torch.nn.Linear(32, 96)\n linear_custom = Linear8bitLt(\n linear.in_features,\n linear.out_features,\n linear.bias is not None,\n has_fp16_weights=False,\n threshold=6.0,\n )\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.cuda()\n return linear_custom\n\n\ndef test_linear8bit_copy_param(linear8bit):\n shallow_copy = copy.copy(linear8bit)\n assert linear8bit.weight is shallow_copy.weight\n assert linear8bit.bias is shallow_copy.bias\n assert linear8bit.weight.data.data_ptr() == shallow_copy.weight.data.data_ptr()\n\n\ndef test_linear8bit_deepcopy_param(linear8bit):\n deep_copy = copy.deepcopy(linear8bit)\n assert linear8bit.weight is not deep_copy.weight\n assert linear8bit.bias is not deep_copy.bias\n assert linear8bit.weight.data.data_ptr() != deep_copy.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deep_copy.weight.data)\n assert linear8bit.state == deep_copy.state\n\n # check for a bug where SCB and CB were not copied\n assert deep_copy.weight.SCB is not None\n assert (linear8bit.weight.SCB == deep_copy.weight.SCB).all()\n assert deep_copy.weight.CB is not None\n assert (linear8bit.weight.CB == deep_copy.weight.CB).all()\n\n\ndef test_linear8bit_serialization(linear8bit):\n serialized = pickle.dumps(linear8bit)\n deserialized = pickle.loads(serialized)","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.test_linear8bit_deepcopy_param","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.test_linear8bit_deepcopy_param#L201-L213","kind":"function","name":"test_linear8bit_deepcopy_param","path":"tests/test_linear8bitlt.py","language":"python","start_line":201,"end_line":213,"context_start_line":181,"context_end_line":233,"code":" has_fp16_weights=False,\n threshold=6.0,\n )\n linear_custom.weight = bnb.nn.Int8Params(\n linear.weight.data.clone(),\n requires_grad=False,\n has_fp16_weights=False,\n )\n linear_custom.bias = linear.bias\n linear_custom = linear_custom.cuda()\n return linear_custom\n\n\ndef test_linear8bit_copy_param(linear8bit):\n shallow_copy = copy.copy(linear8bit)\n assert linear8bit.weight is shallow_copy.weight\n assert linear8bit.bias is shallow_copy.bias\n assert linear8bit.weight.data.data_ptr() == shallow_copy.weight.data.data_ptr()\n\n\ndef test_linear8bit_deepcopy_param(linear8bit):\n deep_copy = copy.deepcopy(linear8bit)\n assert linear8bit.weight is not deep_copy.weight\n assert linear8bit.bias is not deep_copy.bias\n assert linear8bit.weight.data.data_ptr() != deep_copy.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deep_copy.weight.data)\n assert linear8bit.state == deep_copy.state\n\n # check for a bug where SCB and CB were not copied\n assert deep_copy.weight.SCB is not None\n assert (linear8bit.weight.SCB == deep_copy.weight.SCB).all()\n assert deep_copy.weight.CB is not None\n assert (linear8bit.weight.CB == deep_copy.weight.CB).all()\n\n\ndef test_linear8bit_serialization(linear8bit):\n serialized = pickle.dumps(linear8bit)\n deserialized = pickle.loads(serialized)\n assert linear8bit.weight.data.data_ptr() != deserialized.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deserialized.weight.data)\n assert linear8bit.bias.data.data_ptr() != deserialized.bias.data.data_ptr()\n assert torch.allclose(linear8bit.bias.data, deserialized.bias.data)\n assert linear8bit.state == deserialized.state\n\n # check for a bug where SCB and CB were not copied\n assert (linear8bit.weight.SCB == deserialized.weight.SCB).all()\n assert (linear8bit.weight.CB == deserialized.weight.CB).all()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.test_linear8bit_serialization","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.test_linear8bit_serialization#L216-L227","kind":"function","name":"test_linear8bit_serialization","path":"tests/test_linear8bitlt.py","language":"python","start_line":216,"end_line":227,"context_start_line":196,"context_end_line":247,"code":" assert linear8bit.weight is shallow_copy.weight\n assert linear8bit.bias is shallow_copy.bias\n assert linear8bit.weight.data.data_ptr() == shallow_copy.weight.data.data_ptr()\n\n\ndef test_linear8bit_deepcopy_param(linear8bit):\n deep_copy = copy.deepcopy(linear8bit)\n assert linear8bit.weight is not deep_copy.weight\n assert linear8bit.bias is not deep_copy.bias\n assert linear8bit.weight.data.data_ptr() != deep_copy.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deep_copy.weight.data)\n assert linear8bit.state == deep_copy.state\n\n # check for a bug where SCB and CB were not copied\n assert deep_copy.weight.SCB is not None\n assert (linear8bit.weight.SCB == deep_copy.weight.SCB).all()\n assert deep_copy.weight.CB is not None\n assert (linear8bit.weight.CB == deep_copy.weight.CB).all()\n\n\ndef test_linear8bit_serialization(linear8bit):\n serialized = pickle.dumps(linear8bit)\n deserialized = pickle.loads(serialized)\n assert linear8bit.weight.data.data_ptr() != deserialized.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deserialized.weight.data)\n assert linear8bit.bias.data.data_ptr() != deserialized.bias.data.data_ptr()\n assert torch.allclose(linear8bit.bias.data, deserialized.bias.data)\n assert linear8bit.state == deserialized.state\n\n # check for a bug where SCB and CB were not copied\n assert (linear8bit.weight.SCB == deserialized.weight.SCB).all()\n assert (linear8bit.weight.CB == deserialized.weight.CB).all()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))\n@pytest.mark.parametrize(\"mode\", [\"default\", \"reduce-overhead\"], ids=id_formatter(\"mode\"))\n@pytest.mark.skipif(torch.__version__ < (2, 4), reason=\"Not supported in torch < 2.4\")\ndef test_linear8bitlt_torch_compile(device, threshold, bias, fullgraph, mode):\n if device == \"cuda\" and platform.system() == \"Windows\":\n pytest.skip(\"Triton is not officially supported on Windows\")\n\n dim = 256\n batch_size = 16\n\n torch.compiler.reset()\n\n # Create a small network with Linear8bitLt layers\n net = torch.nn.Sequential(\n *[bnb.nn.Linear8bitLt(dim, dim, bias=bias, has_fp16_weights=False, threshold=threshold) for _ in range(4)]","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear8bitlt.test_linear8bitlt_torch_compile","uri":"program://bitsandbytes/function/tests.test_linear8bitlt.test_linear8bitlt_torch_compile#L236-L293","kind":"function","name":"test_linear8bitlt_torch_compile","path":"tests/test_linear8bitlt.py","language":"python","start_line":236,"end_line":293,"context_start_line":216,"context_end_line":293,"code":"def test_linear8bit_serialization(linear8bit):\n serialized = pickle.dumps(linear8bit)\n deserialized = pickle.loads(serialized)\n assert linear8bit.weight.data.data_ptr() != deserialized.weight.data.data_ptr()\n assert torch.allclose(linear8bit.weight.data, deserialized.weight.data)\n assert linear8bit.bias.data.data_ptr() != deserialized.bias.data.data_ptr()\n assert torch.allclose(linear8bit.bias.data, deserialized.bias.data)\n assert linear8bit.state == deserialized.state\n\n # check for a bug where SCB and CB were not copied\n assert (linear8bit.weight.SCB == deserialized.weight.SCB).all()\n assert (linear8bit.weight.CB == deserialized.weight.CB).all()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 6.0], ids=id_formatter(\"threshold\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))\n@pytest.mark.parametrize(\"mode\", [\"default\", \"reduce-overhead\"], ids=id_formatter(\"mode\"))\n@pytest.mark.skipif(torch.__version__ < (2, 4), reason=\"Not supported in torch < 2.4\")\ndef test_linear8bitlt_torch_compile(device, threshold, bias, fullgraph, mode):\n if device == \"cuda\" and platform.system() == \"Windows\":\n pytest.skip(\"Triton is not officially supported on Windows\")\n\n dim = 256\n batch_size = 16\n\n torch.compiler.reset()\n\n # Create a small network with Linear8bitLt layers\n net = torch.nn.Sequential(\n *[bnb.nn.Linear8bitLt(dim, dim, bias=bias, has_fp16_weights=False, threshold=threshold) for _ in range(4)]\n ).to(device)\n\n dynamic_output_shapes = fullgraph and threshold > 0\n with torch._dynamo.config.patch(\"capture_dynamic_output_shape_ops\", dynamic_output_shapes):\n # Create input tensor\n x = torch.randn(batch_size, dim, dtype=torch.float16, device=device)\n\n # Get reference output before compilation\n with torch.no_grad():\n ref_output = net(x)\n\n # Compile the model\n compile_backend = \"hpu_backend\" if device == \"hpu\" else \"inductor\"\n compiled_net = torch.compile(net, fullgraph=fullgraph, mode=mode, backend=compile_backend)\n\n # Get output from compiled model\n with torch.no_grad():\n compiled_output = compiled_net(x)\n\n # Check outputs match\n assert compiled_output.shape == ref_output.shape\n assert compiled_output.device == ref_output.device\n assert compiled_output.dtype == ref_output.dtype\n torch.testing.assert_close(compiled_output, ref_output)\n\n # Test with gradients. Currently only works with threshold=0.\n # Has a strange regression on Linux aarch64 CPU in torch==2.6.0.\n is_broken_platform = (\n device == \"cpu\"\n and platform.system() == \"Linux\"\n and platform.machine() == \"aarch64\"\n and (2, 6) <= torch.__version__ < (2, 7)\n )\n\n if threshold == 0 and not is_broken_platform:\n x.requires_grad_(True)\n y1 = net(x).sum()\n y1.backward()\n grad_ref = x.grad.clone()\n\n x.grad = None\n y2 = compiled_net(x).sum()\n y2.backward()\n grad_compiled = x.grad.clone()\n\n torch.testing.assert_close(grad_compiled, grad_ref)","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize","uri":"program://bitsandbytes/module/tests.test_parametrize#L1-L411","kind":"module","name":"tests.test_parametrize","path":"tests/test_parametrize.py","language":"python","start_line":1,"end_line":411,"context_start_line":1,"context_end_line":411,"code":"import pytest\nimport torch\nimport torch.nn as nn\n\nfrom bitsandbytes import functional as F\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom bitsandbytes.nn.parametrize import (\n Bnb4bitParametrization,\n replace_parameter_4bit,\n replace_parameter_4bit_prequantized,\n)\nfrom tests.helpers import (\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n)\n\n\nclass ParametrizeTestModule(nn.Module):\n \"\"\"Test module with different parameter shapes for testing parametrization.\"\"\"\n\n def __init__(self, device=\"cpu\", dtype=torch.float32):\n super().__init__()\n # 2D parameter (typical weight matrix)\n self.weight_2d = nn.Parameter(torch.randn(1024, 1024, device=device, dtype=dtype))\n # 3D parameter (MoE expert weights - the main use case for this feature)\n self.expert_weights = nn.Parameter(torch.randn(8, 512, 256, device=device, dtype=dtype))\n # 1D parameter (bias-like)\n self.bias_1d = nn.Parameter(torch.randn(1024, device=device, dtype=dtype))\n # Non-parameter attribute (should not be quantizable)\n self.not_param = torch.randn(32, device=device, dtype=dtype)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256] if not HIP_ENVIRONMENT else [128, 256],\n)\ndef test_replace_parameter_4bit(device, dtype, quant_type, compress_statistics, blocksize):\n \"\"\"Test basic parameter replacement with 4-bit quantization on different dtypes.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n # Create module directly on target device to avoid unnecessary transfers\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.weight_2d.clone()\n\n # Apply 4-bit quantization parametrization to the weight parameter\n replace_parameter_4bit(\n module, \"weight_2d\", compress_statistics=compress_statistics, quant_type=quant_type, blocksize=blocksize\n )\n\n # Verify that parametrization was applied correctly\n assert hasattr(module, \"parametrizations\"), \"Module should have parametrizations attribute\"\n assert \"weight_2d\" in module.parametrizations, \"weight_2d should be parametrized\"\n\n # Test that accessing the parameter returns dequantized version with correct properties\n reconstructed = module.weight_2d\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n # Verify quantization quality using same approach as functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Expected error bounds from test_functional.py\n expected_errors = {\n \"nf4\": {\n 64: {\"abs\": 0.072792, \"rel\": 0.203299},\n 128: {\"abs\": 0.076835, \"rel\": 0.215252},\n 256: {\"abs\": 0.080326, \"rel\": 0.226044},\n },\n \"fp4\": {\n 64: {\"abs\": 0.096545, \"rel\": 0.260130},\n 128: {\"abs\": 0.102947, \"rel\": 0.275734},\n 256: {\"abs\": 0.108685, \"rel\": 0.289842},\n },\n }\n\n assert err_mean < expected_errors[quant_type][blocksize][\"abs\"] + 1e-3, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < expected_errors[quant_type][blocksize][\"rel\"] + 1e-3, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_parameter_shape(device, dtype):\n \"\"\"Test parametrization with MoE-style parameter shape\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n param_shape = (8, 64, 32)\n\n # Create module with custom parameter shape directly on target device\n class MoEModule(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n self.param = nn.Parameter(torch.randn(*param_shape, dtype=dtype, device=device))\n\n module = MoEModule(device=device, dtype=dtype)\n original_param = module.param.clone()\n\n # Apply quantization parametrization\n replace_parameter_4bit(module, \"param\", quant_type=\"nf4\")\n\n # Verify reconstruction maintains all properties\n reconstructed = module.param\n assert reconstructed.shape == param_shape, f\"Shape should be preserved: {reconstructed.shape} vs {param_shape}\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use slightly looser bounds for higher dimensional tensors\n abs_bound = 0.085 # NF4 baseline + margin\n rel_bound = 0.25 # NF4 baseline + margin\n\n assert err_mean < abs_bound, f\"Mean abs error {err_mean:.6f} too high for shape {param_shape}\"\n assert relerr < rel_bound, f\"Mean rel error {relerr:.6f} too high for shape {param_shape}\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\ndef test_prequantized_replacement(device, dtype, quant_type):\n \"\"\"Test applying parametrization to already quantized parameters.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.weight_2d.clone()\n\n # Manually quantize the parameter data first (simulates loading pre-quantized weights)\n quantized_data, quant_state = F.quantize_4bit(original_param.data, quant_type=quant_type)\n\n # Replace parameter with quantized data (what would happen during model loading)\n module.weight_2d = nn.Parameter(quantized_data, requires_grad=False)\n\n # Apply parametrization to handle dequantization on access\n replace_parameter_4bit_prequantized(\n module, \"weight_2d\", quant_state.as_dict(packed=True), device=torch.device(device)\n )\n\n # Test that parameter access properly dequantizes\n reconstructed = module.weight_2d\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_state_dict_functionality(device, dtype, quant_type, compress_statistics):\n \"\"\"Test that state dict saving works with quantized parameters.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n # Apply parametrization to expert weights (main MoE use case)\n replace_parameter_4bit(module, \"expert_weights\", quant_type=quant_type, compress_statistics=compress_statistics)\n\n # Save state dict - should include quantization state, not parametrization internals\n state_dict = module.state_dict()\n\n # Verify state dict structure: quantized param + quantization metadata\n assert \"expert_weights\" in state_dict, \"Quantized parameter should be in state dict\"\n assert \"expert_weights.absmax\" in state_dict, \"Quantization absmax should be saved\"\n assert \"expert_weights.quant_map\" in state_dict, \"Quantization map should be saved\"\n assert f\"expert_weights.quant_state.bitsandbytes__{quant_type}\" in state_dict, \"Quant state should be saved\"\n\n # Verify parametrization internals are NOT saved (clean state dict)\n assert \"parametrizations.expert_weights.original\" not in state_dict, (\n \"Internal parametrization keys should not be saved\"\n )\n\n # Test that the parameter can be accessed after state dict creation\n reconstructed = module.expert_weights\n assert reconstructed.shape == (8, 512, 256), \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_realistic_forward(device, dtype):\n \"\"\"Test realistic MoE forward computation with quantized expert weights.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n class SimpleMoE(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n # Expert weights: [num_experts, input_dim, output_dim]\n self.expert_weights = nn.Parameter(torch.randn(4, 32, 64, dtype=dtype, device=device))\n\n def forward(self, x, expert_idx=0):\n # Select and use specific expert weight matrix\n expert_weight = self.expert_weights[expert_idx] # Shape: [input_dim, output_dim]\n return torch.matmul(x, expert_weight)\n\n module = SimpleMoE(device=device, dtype=dtype)\n x = torch.randn(8, 32, dtype=dtype, device=device)\n\n # Get reference output before quantization\n with torch.no_grad():\n reference_output = module(x, expert_idx=1)\n\n # Apply 4-bit quantization to expert weights\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\")\n\n # Get output after quantization - should be very close to original\n with torch.no_grad():\n quantized_output = module(x, expert_idx=1)\n\n # Verify outputs match within quantization tolerance\n assert quantized_output.shape == reference_output.shape, \"Output shape should be preserved\"\n\n # Calculate error like functional tests (matrix ops may amplify quantization errors)\n err = (reference_output - quantized_output).abs().float()\n relerr = (err / (reference_output.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Allow for error amplification through matrix multiplication\n assert err_mean < 0.5, f\"Forward pass mean abs error {err_mean:.6f} too high\"\n assert relerr < 2.0, f\"Forward pass mean rel error {relerr:.6f} too high\"\n\n\ndef test_error_conditions():\n \"\"\"Test that proper errors are raised for invalid inputs.\"\"\"\n module = ParametrizeTestModule()\n\n # Test AttributeError for non-existent parameter\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit(module, \"nonexistent\")\n\n # Test TypeError for non-Parameter attribute\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit(module, \"not_param\")\n\n # Test same errors for prequantized version\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit_prequantized(module, \"nonexistent\", {}, torch.device(\"cpu\"))\n\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit_prequantized(module, \"not_param\", {}, torch.device(\"cpu\"))\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_quant_state_preservation(device, dtype):\n \"\"\"Test that quantization state is properly preserved and accessible.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n blocksize = 128 if HIP_ENVIRONMENT else 64\n\n # Apply parametrization with specific settings\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\", compress_statistics=True, blocksize=blocksize)\n\n # Verify that quantization state is accessible through parametrization\n parametrization = module.parametrizations.weight_2d[0]\n assert isinstance(parametrization, Bnb4bitParametrization), \"Should be Bnb4bitParametrization instance\"\n\n # Check quantization state properties\n quant_state = parametrization.quant_state\n assert isinstance(quant_state, F.QuantState), \"Should have QuantState\"\n assert quant_state.quant_type == \"nf4\", \"Quant type should be preserved\"\n assert quant_state.blocksize == blocksize, \"Block size should be preserved\"\n\n # Verify that state dict includes all necessary quantization metadata\n state_dict = module.state_dict()\n quant_state_dict = quant_state.as_dict(packed=True)\n\n for key in quant_state_dict.keys():\n full_key = f\"weight_2d.{key}\"\n assert full_key in state_dict, f\"Quantization metadata '{full_key}' should be in state dict\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_multiple_parameters(device, dtype):\n \"\"\"Test applying parametrization to multiple parameters in the same module.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_2d = module.weight_2d.clone()\n original_3d = module.expert_weights.clone()\n\n # Apply parametrization to multiple parameters, with varying configurations\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\", blocksize=128)\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"fp4\", blocksize=256)\n\n # Verify both parameters are parametrized and work correctly\n reconstructed_2d = module.weight_2d\n reconstructed_3d = module.expert_weights\n\n assert reconstructed_2d.shape == original_2d.shape, \"2D parameter shape should be preserved\"\n assert reconstructed_3d.shape == original_3d.shape, \"3D parameter shape should be preserved\"\n\n # Check that state dict includes quantization info for both parameters\n state_dict = module.state_dict()\n assert \"weight_2d\" in state_dict, \"2D parameter should be in state dict\"\n assert \"expert_weights\" in state_dict, \"3D parameter should be in state dict\"\n assert \"weight_2d.absmax\" in state_dict, \"2D parameter quantization metadata should be saved\"\n assert \"expert_weights.absmax\" in state_dict, \"3D parameter quantization metadata should be saved\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256] if not HIP_ENVIRONMENT else [128, 256],\n)\ndef test_different_blocksizes(device, dtype, blocksize):\n \"\"\"Test parametrization with different block sizes to verify flexibility.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.expert_weights.clone()\n\n # Apply parametrization with specified block size\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\", blocksize=blocksize)\n\n # Verify reconstruction works with different block sizes\n reconstructed = module.expert_weights\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.device.type == device, \"Device should match\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Expected error bounds from functional tests (using NF4 bounds since that's what we're testing)\n expected_abs = {64: 0.072792, 128: 0.076835, 256: 0.080326}\n expected_rel = {64: 0.203299, 128: 0.215252, 256: 0.226044}\n\n assert err_mean < expected_abs[blocksize] + 0.01, (\n f\"Mean abs error {err_mean:.6f} too high for blocksize {blocksize}\"\n )\n assert relerr < expected_rel[blocksize] + 0.02, f\"Mean rel error {relerr:.6f} too high for blocksize {blocksize}\"\n\n\ndef test_parametrization_forward_method():\n \"\"\"Test the Bnb4bitParametrization forward method directly.\"\"\"\n device = \"cpu\"\n\n # Create test tensor and manually quantize it\n original_tensor = torch.randn(64, 32, dtype=torch.float32, device=device)\n quantized_data, quant_state = F.quantize_4bit(original_tensor, quant_type=\"nf4\")\n\n # Create parametrization instance\n parametrization = Bnb4bitParametrization(quant_state)\n\n # Test forward pass (dequantization)\n dequantized = parametrization.forward(quantized_data)\n\n # Verify dequantization produces correct output\n assert dequantized.shape == original_tensor.shape, \"Shape should be preserved during dequantization\"\n assert dequantized.dtype == torch.float32, \"dtype should be preserved\"\n assert dequantized.device == original_tensor.device, \"Device should be preserved\"\n\n # Check that dequantization approximates original using mean error calculation\n err = (original_tensor - dequantized.detach()).abs().float()\n relerr = (err / (original_tensor.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use NF4 bounds from functional tests with small margin\n assert err_mean < 0.08, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < 0.25, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_gradient_behavior(device, dtype):\n \"\"\"Test that quantized parameters have proper gradient behavior.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n # Ensure original parameter requires gradients\n module.weight_2d.requires_grad_(True)\n assert module.weight_2d.requires_grad, \"Original parameter should require gradients\"\n\n # Apply quantization parametrization\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\")\n\n # Verify that quantized parameters don't require gradients (expected behavior)\n # The underlying quantized parameter should have requires_grad=False\n # The dequantized output should also not require gradients\n reconstructed = module.weight_2d\n assert not reconstructed.requires_grad, \"Dequantized parameter should not require gradients\"","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.ParametrizeTestModule","uri":"program://bitsandbytes/class/tests.test_parametrize.ParametrizeTestModule#L21-L33","kind":"class","name":"ParametrizeTestModule","path":"tests/test_parametrize.py","language":"python","start_line":21,"end_line":33,"context_start_line":1,"context_end_line":53,"code":"import pytest\nimport torch\nimport torch.nn as nn\n\nfrom bitsandbytes import functional as F\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom bitsandbytes.nn.parametrize import (\n Bnb4bitParametrization,\n replace_parameter_4bit,\n replace_parameter_4bit_prequantized,\n)\nfrom tests.helpers import (\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n)\n\n\nclass ParametrizeTestModule(nn.Module):\n \"\"\"Test module with different parameter shapes for testing parametrization.\"\"\"\n\n def __init__(self, device=\"cpu\", dtype=torch.float32):\n super().__init__()\n # 2D parameter (typical weight matrix)\n self.weight_2d = nn.Parameter(torch.randn(1024, 1024, device=device, dtype=dtype))\n # 3D parameter (MoE expert weights - the main use case for this feature)\n self.expert_weights = nn.Parameter(torch.randn(8, 512, 256, device=device, dtype=dtype))\n # 1D parameter (bias-like)\n self.bias_1d = nn.Parameter(torch.randn(1024, device=device, dtype=dtype))\n # Non-parameter attribute (should not be quantizable)\n self.not_param = torch.randn(32, device=device, dtype=dtype)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256] if not HIP_ENVIRONMENT else [128, 256],\n)\ndef test_replace_parameter_4bit(device, dtype, quant_type, compress_statistics, blocksize):\n \"\"\"Test basic parameter replacement with 4-bit quantization on different dtypes.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n # Create module directly on target device to avoid unnecessary transfers\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.weight_2d.clone()\n\n # Apply 4-bit quantization parametrization to the weight parameter","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_replace_parameter_4bit","uri":"program://bitsandbytes/function/tests.test_parametrize.test_replace_parameter_4bit#L44-L88","kind":"function","name":"test_replace_parameter_4bit","path":"tests/test_parametrize.py","language":"python","start_line":44,"end_line":88,"context_start_line":24,"context_end_line":108,"code":" def __init__(self, device=\"cpu\", dtype=torch.float32):\n super().__init__()\n # 2D parameter (typical weight matrix)\n self.weight_2d = nn.Parameter(torch.randn(1024, 1024, device=device, dtype=dtype))\n # 3D parameter (MoE expert weights - the main use case for this feature)\n self.expert_weights = nn.Parameter(torch.randn(8, 512, 256, device=device, dtype=dtype))\n # 1D parameter (bias-like)\n self.bias_1d = nn.Parameter(torch.randn(1024, device=device, dtype=dtype))\n # Non-parameter attribute (should not be quantizable)\n self.not_param = torch.randn(32, device=device, dtype=dtype)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256] if not HIP_ENVIRONMENT else [128, 256],\n)\ndef test_replace_parameter_4bit(device, dtype, quant_type, compress_statistics, blocksize):\n \"\"\"Test basic parameter replacement with 4-bit quantization on different dtypes.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n # Create module directly on target device to avoid unnecessary transfers\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.weight_2d.clone()\n\n # Apply 4-bit quantization parametrization to the weight parameter\n replace_parameter_4bit(\n module, \"weight_2d\", compress_statistics=compress_statistics, quant_type=quant_type, blocksize=blocksize\n )\n\n # Verify that parametrization was applied correctly\n assert hasattr(module, \"parametrizations\"), \"Module should have parametrizations attribute\"\n assert \"weight_2d\" in module.parametrizations, \"weight_2d should be parametrized\"\n\n # Test that accessing the parameter returns dequantized version with correct properties\n reconstructed = module.weight_2d\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n # Verify quantization quality using same approach as functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Expected error bounds from test_functional.py\n expected_errors = {\n \"nf4\": {\n 64: {\"abs\": 0.072792, \"rel\": 0.203299},\n 128: {\"abs\": 0.076835, \"rel\": 0.215252},\n 256: {\"abs\": 0.080326, \"rel\": 0.226044},\n },\n \"fp4\": {\n 64: {\"abs\": 0.096545, \"rel\": 0.260130},\n 128: {\"abs\": 0.102947, \"rel\": 0.275734},\n 256: {\"abs\": 0.108685, \"rel\": 0.289842},\n },\n }\n\n assert err_mean < expected_errors[quant_type][blocksize][\"abs\"] + 1e-3, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < expected_errors[quant_type][blocksize][\"rel\"] + 1e-3, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_parameter_shape(device, dtype):\n \"\"\"Test parametrization with MoE-style parameter shape\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n param_shape = (8, 64, 32)\n\n # Create module with custom parameter shape directly on target device\n class MoEModule(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n self.param = nn.Parameter(torch.randn(*param_shape, dtype=dtype, device=device))\n\n module = MoEModule(device=device, dtype=dtype)\n original_param = module.param.clone()\n","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_moe_parameter_shape","uri":"program://bitsandbytes/function/tests.test_parametrize.test_moe_parameter_shape#L93-L128","kind":"function","name":"test_moe_parameter_shape","path":"tests/test_parametrize.py","language":"python","start_line":93,"end_line":128,"context_start_line":73,"context_end_line":148,"code":" # Expected error bounds from test_functional.py\n expected_errors = {\n \"nf4\": {\n 64: {\"abs\": 0.072792, \"rel\": 0.203299},\n 128: {\"abs\": 0.076835, \"rel\": 0.215252},\n 256: {\"abs\": 0.080326, \"rel\": 0.226044},\n },\n \"fp4\": {\n 64: {\"abs\": 0.096545, \"rel\": 0.260130},\n 128: {\"abs\": 0.102947, \"rel\": 0.275734},\n 256: {\"abs\": 0.108685, \"rel\": 0.289842},\n },\n }\n\n assert err_mean < expected_errors[quant_type][blocksize][\"abs\"] + 1e-3, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < expected_errors[quant_type][blocksize][\"rel\"] + 1e-3, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_parameter_shape(device, dtype):\n \"\"\"Test parametrization with MoE-style parameter shape\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n param_shape = (8, 64, 32)\n\n # Create module with custom parameter shape directly on target device\n class MoEModule(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n self.param = nn.Parameter(torch.randn(*param_shape, dtype=dtype, device=device))\n\n module = MoEModule(device=device, dtype=dtype)\n original_param = module.param.clone()\n\n # Apply quantization parametrization\n replace_parameter_4bit(module, \"param\", quant_type=\"nf4\")\n\n # Verify reconstruction maintains all properties\n reconstructed = module.param\n assert reconstructed.shape == param_shape, f\"Shape should be preserved: {reconstructed.shape} vs {param_shape}\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use slightly looser bounds for higher dimensional tensors\n abs_bound = 0.085 # NF4 baseline + margin\n rel_bound = 0.25 # NF4 baseline + margin\n\n assert err_mean < abs_bound, f\"Mean abs error {err_mean:.6f} too high for shape {param_shape}\"\n assert relerr < rel_bound, f\"Mean rel error {relerr:.6f} too high for shape {param_shape}\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\ndef test_prequantized_replacement(device, dtype, quant_type):\n \"\"\"Test applying parametrization to already quantized parameters.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.weight_2d.clone()\n\n # Manually quantize the parameter data first (simulates loading pre-quantized weights)\n quantized_data, quant_state = F.quantize_4bit(original_param.data, quant_type=quant_type)\n\n # Replace parameter with quantized data (what would happen during model loading)\n module.weight_2d = nn.Parameter(quantized_data, requires_grad=False)\n\n # Apply parametrization to handle dequantization on access","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_prequantized_replacement","uri":"program://bitsandbytes/function/tests.test_parametrize.test_prequantized_replacement#L134-L157","kind":"function","name":"test_prequantized_replacement","path":"tests/test_parametrize.py","language":"python","start_line":134,"end_line":157,"context_start_line":114,"context_end_line":177,"code":" assert reconstructed.shape == param_shape, f\"Shape should be preserved: {reconstructed.shape} vs {param_shape}\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use slightly looser bounds for higher dimensional tensors\n abs_bound = 0.085 # NF4 baseline + margin\n rel_bound = 0.25 # NF4 baseline + margin\n\n assert err_mean < abs_bound, f\"Mean abs error {err_mean:.6f} too high for shape {param_shape}\"\n assert relerr < rel_bound, f\"Mean rel error {relerr:.6f} too high for shape {param_shape}\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\ndef test_prequantized_replacement(device, dtype, quant_type):\n \"\"\"Test applying parametrization to already quantized parameters.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.weight_2d.clone()\n\n # Manually quantize the parameter data first (simulates loading pre-quantized weights)\n quantized_data, quant_state = F.quantize_4bit(original_param.data, quant_type=quant_type)\n\n # Replace parameter with quantized data (what would happen during model loading)\n module.weight_2d = nn.Parameter(quantized_data, requires_grad=False)\n\n # Apply parametrization to handle dequantization on access\n replace_parameter_4bit_prequantized(\n module, \"weight_2d\", quant_state.as_dict(packed=True), device=torch.device(device)\n )\n\n # Test that parameter access properly dequantizes\n reconstructed = module.weight_2d\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_state_dict_functionality(device, dtype, quant_type, compress_statistics):\n \"\"\"Test that state dict saving works with quantized parameters.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n # Apply parametrization to expert weights (main MoE use case)\n replace_parameter_4bit(module, \"expert_weights\", quant_type=quant_type, compress_statistics=compress_statistics)\n\n # Save state dict - should include quantization state, not parametrization internals\n state_dict = module.state_dict()\n","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_state_dict_functionality","uri":"program://bitsandbytes/function/tests.test_parametrize.test_state_dict_functionality#L165-L192","kind":"function","name":"test_state_dict_functionality","path":"tests/test_parametrize.py","language":"python","start_line":165,"end_line":192,"context_start_line":145,"context_end_line":212,"code":" # Replace parameter with quantized data (what would happen during model loading)\n module.weight_2d = nn.Parameter(quantized_data, requires_grad=False)\n\n # Apply parametrization to handle dequantization on access\n replace_parameter_4bit_prequantized(\n module, \"weight_2d\", quant_state.as_dict(packed=True), device=torch.device(device)\n )\n\n # Test that parameter access properly dequantizes\n reconstructed = module.weight_2d\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_state_dict_functionality(device, dtype, quant_type, compress_statistics):\n \"\"\"Test that state dict saving works with quantized parameters.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n # Apply parametrization to expert weights (main MoE use case)\n replace_parameter_4bit(module, \"expert_weights\", quant_type=quant_type, compress_statistics=compress_statistics)\n\n # Save state dict - should include quantization state, not parametrization internals\n state_dict = module.state_dict()\n\n # Verify state dict structure: quantized param + quantization metadata\n assert \"expert_weights\" in state_dict, \"Quantized parameter should be in state dict\"\n assert \"expert_weights.absmax\" in state_dict, \"Quantization absmax should be saved\"\n assert \"expert_weights.quant_map\" in state_dict, \"Quantization map should be saved\"\n assert f\"expert_weights.quant_state.bitsandbytes__{quant_type}\" in state_dict, \"Quant state should be saved\"\n\n # Verify parametrization internals are NOT saved (clean state dict)\n assert \"parametrizations.expert_weights.original\" not in state_dict, (\n \"Internal parametrization keys should not be saved\"\n )\n\n # Test that the parameter can be accessed after state dict creation\n reconstructed = module.expert_weights\n assert reconstructed.shape == (8, 512, 256), \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_realistic_forward(device, dtype):\n \"\"\"Test realistic MoE forward computation with quantized expert weights.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n class SimpleMoE(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n # Expert weights: [num_experts, input_dim, output_dim]\n self.expert_weights = nn.Parameter(torch.randn(4, 32, 64, dtype=dtype, device=device))\n\n def forward(self, x, expert_idx=0):\n # Select and use specific expert weight matrix\n expert_weight = self.expert_weights[expert_idx] # Shape: [input_dim, output_dim]\n return torch.matmul(x, expert_weight)\n","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_moe_realistic_forward","uri":"program://bitsandbytes/function/tests.test_parametrize.test_moe_realistic_forward#L197-L237","kind":"function","name":"test_moe_realistic_forward","path":"tests/test_parametrize.py","language":"python","start_line":197,"end_line":237,"context_start_line":177,"context_end_line":257,"code":"\n # Verify state dict structure: quantized param + quantization metadata\n assert \"expert_weights\" in state_dict, \"Quantized parameter should be in state dict\"\n assert \"expert_weights.absmax\" in state_dict, \"Quantization absmax should be saved\"\n assert \"expert_weights.quant_map\" in state_dict, \"Quantization map should be saved\"\n assert f\"expert_weights.quant_state.bitsandbytes__{quant_type}\" in state_dict, \"Quant state should be saved\"\n\n # Verify parametrization internals are NOT saved (clean state dict)\n assert \"parametrizations.expert_weights.original\" not in state_dict, (\n \"Internal parametrization keys should not be saved\"\n )\n\n # Test that the parameter can be accessed after state dict creation\n reconstructed = module.expert_weights\n assert reconstructed.shape == (8, 512, 256), \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_realistic_forward(device, dtype):\n \"\"\"Test realistic MoE forward computation with quantized expert weights.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n class SimpleMoE(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n # Expert weights: [num_experts, input_dim, output_dim]\n self.expert_weights = nn.Parameter(torch.randn(4, 32, 64, dtype=dtype, device=device))\n\n def forward(self, x, expert_idx=0):\n # Select and use specific expert weight matrix\n expert_weight = self.expert_weights[expert_idx] # Shape: [input_dim, output_dim]\n return torch.matmul(x, expert_weight)\n\n module = SimpleMoE(device=device, dtype=dtype)\n x = torch.randn(8, 32, dtype=dtype, device=device)\n\n # Get reference output before quantization\n with torch.no_grad():\n reference_output = module(x, expert_idx=1)\n\n # Apply 4-bit quantization to expert weights\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\")\n\n # Get output after quantization - should be very close to original\n with torch.no_grad():\n quantized_output = module(x, expert_idx=1)\n\n # Verify outputs match within quantization tolerance\n assert quantized_output.shape == reference_output.shape, \"Output shape should be preserved\"\n\n # Calculate error like functional tests (matrix ops may amplify quantization errors)\n err = (reference_output - quantized_output).abs().float()\n relerr = (err / (reference_output.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Allow for error amplification through matrix multiplication\n assert err_mean < 0.5, f\"Forward pass mean abs error {err_mean:.6f} too high\"\n assert relerr < 2.0, f\"Forward pass mean rel error {relerr:.6f} too high\"\n\n\ndef test_error_conditions():\n \"\"\"Test that proper errors are raised for invalid inputs.\"\"\"\n module = ParametrizeTestModule()\n\n # Test AttributeError for non-existent parameter\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit(module, \"nonexistent\")\n\n # Test TypeError for non-Parameter attribute\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit(module, \"not_param\")\n\n # Test same errors for prequantized version\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit_prequantized(module, \"nonexistent\", {}, torch.device(\"cpu\"))\n\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit_prequantized(module, \"not_param\", {}, torch.device(\"cpu\"))","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_error_conditions","uri":"program://bitsandbytes/function/tests.test_parametrize.test_error_conditions#L240-L257","kind":"function","name":"test_error_conditions","path":"tests/test_parametrize.py","language":"python","start_line":240,"end_line":257,"context_start_line":220,"context_end_line":277,"code":" # Apply 4-bit quantization to expert weights\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\")\n\n # Get output after quantization - should be very close to original\n with torch.no_grad():\n quantized_output = module(x, expert_idx=1)\n\n # Verify outputs match within quantization tolerance\n assert quantized_output.shape == reference_output.shape, \"Output shape should be preserved\"\n\n # Calculate error like functional tests (matrix ops may amplify quantization errors)\n err = (reference_output - quantized_output).abs().float()\n relerr = (err / (reference_output.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Allow for error amplification through matrix multiplication\n assert err_mean < 0.5, f\"Forward pass mean abs error {err_mean:.6f} too high\"\n assert relerr < 2.0, f\"Forward pass mean rel error {relerr:.6f} too high\"\n\n\ndef test_error_conditions():\n \"\"\"Test that proper errors are raised for invalid inputs.\"\"\"\n module = ParametrizeTestModule()\n\n # Test AttributeError for non-existent parameter\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit(module, \"nonexistent\")\n\n # Test TypeError for non-Parameter attribute\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit(module, \"not_param\")\n\n # Test same errors for prequantized version\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit_prequantized(module, \"nonexistent\", {}, torch.device(\"cpu\"))\n\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit_prequantized(module, \"not_param\", {}, torch.device(\"cpu\"))\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_quant_state_preservation(device, dtype):\n \"\"\"Test that quantization state is properly preserved and accessible.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n blocksize = 128 if HIP_ENVIRONMENT else 64\n\n # Apply parametrization with specific settings\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\", compress_statistics=True, blocksize=blocksize)\n\n # Verify that quantization state is accessible through parametrization\n parametrization = module.parametrizations.weight_2d[0]\n assert isinstance(parametrization, Bnb4bitParametrization), \"Should be Bnb4bitParametrization instance\"","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_quant_state_preservation","uri":"program://bitsandbytes/function/tests.test_parametrize.test_quant_state_preservation#L263-L291","kind":"function","name":"test_quant_state_preservation","path":"tests/test_parametrize.py","language":"python","start_line":263,"end_line":291,"context_start_line":243,"context_end_line":311,"code":"\n # Test AttributeError for non-existent parameter\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit(module, \"nonexistent\")\n\n # Test TypeError for non-Parameter attribute\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit(module, \"not_param\")\n\n # Test same errors for prequantized version\n with pytest.raises(AttributeError, match=\"Module does not have parameter 'nonexistent'\"):\n replace_parameter_4bit_prequantized(module, \"nonexistent\", {}, torch.device(\"cpu\"))\n\n with pytest.raises(TypeError, match=\"Parameter 'not_param' is not an instance of nn.Parameter\"):\n replace_parameter_4bit_prequantized(module, \"not_param\", {}, torch.device(\"cpu\"))\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_quant_state_preservation(device, dtype):\n \"\"\"Test that quantization state is properly preserved and accessible.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n blocksize = 128 if HIP_ENVIRONMENT else 64\n\n # Apply parametrization with specific settings\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\", compress_statistics=True, blocksize=blocksize)\n\n # Verify that quantization state is accessible through parametrization\n parametrization = module.parametrizations.weight_2d[0]\n assert isinstance(parametrization, Bnb4bitParametrization), \"Should be Bnb4bitParametrization instance\"\n\n # Check quantization state properties\n quant_state = parametrization.quant_state\n assert isinstance(quant_state, F.QuantState), \"Should have QuantState\"\n assert quant_state.quant_type == \"nf4\", \"Quant type should be preserved\"\n assert quant_state.blocksize == blocksize, \"Block size should be preserved\"\n\n # Verify that state dict includes all necessary quantization metadata\n state_dict = module.state_dict()\n quant_state_dict = quant_state.as_dict(packed=True)\n\n for key in quant_state_dict.keys():\n full_key = f\"weight_2d.{key}\"\n assert full_key in state_dict, f\"Quantization metadata '{full_key}' should be in state dict\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_multiple_parameters(device, dtype):\n \"\"\"Test applying parametrization to multiple parameters in the same module.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_2d = module.weight_2d.clone()\n original_3d = module.expert_weights.clone()\n\n # Apply parametrization to multiple parameters, with varying configurations\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\", blocksize=128)\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"fp4\", blocksize=256)\n\n # Verify both parameters are parametrized and work correctly\n reconstructed_2d = module.weight_2d","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_multiple_parameters","uri":"program://bitsandbytes/function/tests.test_parametrize.test_multiple_parameters#L297-L322","kind":"function","name":"test_multiple_parameters","path":"tests/test_parametrize.py","language":"python","start_line":297,"end_line":322,"context_start_line":277,"context_end_line":342,"code":" assert isinstance(parametrization, Bnb4bitParametrization), \"Should be Bnb4bitParametrization instance\"\n\n # Check quantization state properties\n quant_state = parametrization.quant_state\n assert isinstance(quant_state, F.QuantState), \"Should have QuantState\"\n assert quant_state.quant_type == \"nf4\", \"Quant type should be preserved\"\n assert quant_state.blocksize == blocksize, \"Block size should be preserved\"\n\n # Verify that state dict includes all necessary quantization metadata\n state_dict = module.state_dict()\n quant_state_dict = quant_state.as_dict(packed=True)\n\n for key in quant_state_dict.keys():\n full_key = f\"weight_2d.{key}\"\n assert full_key in state_dict, f\"Quantization metadata '{full_key}' should be in state dict\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.skipif(torch.__version__ < (2, 5), reason=\"state dict hook requires torch >= 2.5.0\")\ndef test_multiple_parameters(device, dtype):\n \"\"\"Test applying parametrization to multiple parameters in the same module.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_2d = module.weight_2d.clone()\n original_3d = module.expert_weights.clone()\n\n # Apply parametrization to multiple parameters, with varying configurations\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\", blocksize=128)\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"fp4\", blocksize=256)\n\n # Verify both parameters are parametrized and work correctly\n reconstructed_2d = module.weight_2d\n reconstructed_3d = module.expert_weights\n\n assert reconstructed_2d.shape == original_2d.shape, \"2D parameter shape should be preserved\"\n assert reconstructed_3d.shape == original_3d.shape, \"3D parameter shape should be preserved\"\n\n # Check that state dict includes quantization info for both parameters\n state_dict = module.state_dict()\n assert \"weight_2d\" in state_dict, \"2D parameter should be in state dict\"\n assert \"expert_weights\" in state_dict, \"3D parameter should be in state dict\"\n assert \"weight_2d.absmax\" in state_dict, \"2D parameter quantization metadata should be saved\"\n assert \"expert_weights.absmax\" in state_dict, \"3D parameter quantization metadata should be saved\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256] if not HIP_ENVIRONMENT else [128, 256],\n)\ndef test_different_blocksizes(device, dtype, blocksize):\n \"\"\"Test parametrization with different block sizes to verify flexibility.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.expert_weights.clone()\n\n # Apply parametrization with specified block size\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\", blocksize=blocksize)\n\n # Verify reconstruction works with different block sizes","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_different_blocksizes","uri":"program://bitsandbytes/function/tests.test_parametrize.test_different_blocksizes#L331-L359","kind":"function","name":"test_different_blocksizes","path":"tests/test_parametrize.py","language":"python","start_line":331,"end_line":359,"context_start_line":311,"context_end_line":379,"code":" reconstructed_2d = module.weight_2d\n reconstructed_3d = module.expert_weights\n\n assert reconstructed_2d.shape == original_2d.shape, \"2D parameter shape should be preserved\"\n assert reconstructed_3d.shape == original_3d.shape, \"3D parameter shape should be preserved\"\n\n # Check that state dict includes quantization info for both parameters\n state_dict = module.state_dict()\n assert \"weight_2d\" in state_dict, \"2D parameter should be in state dict\"\n assert \"expert_weights\" in state_dict, \"3D parameter should be in state dict\"\n assert \"weight_2d.absmax\" in state_dict, \"2D parameter quantization metadata should be saved\"\n assert \"expert_weights.absmax\" in state_dict, \"3D parameter quantization metadata should be saved\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256] if not HIP_ENVIRONMENT else [128, 256],\n)\ndef test_different_blocksizes(device, dtype, blocksize):\n \"\"\"Test parametrization with different block sizes to verify flexibility.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n original_param = module.expert_weights.clone()\n\n # Apply parametrization with specified block size\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\", blocksize=blocksize)\n\n # Verify reconstruction works with different block sizes\n reconstructed = module.expert_weights\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.device.type == device, \"Device should match\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Expected error bounds from functional tests (using NF4 bounds since that's what we're testing)\n expected_abs = {64: 0.072792, 128: 0.076835, 256: 0.080326}\n expected_rel = {64: 0.203299, 128: 0.215252, 256: 0.226044}\n\n assert err_mean < expected_abs[blocksize] + 0.01, (\n f\"Mean abs error {err_mean:.6f} too high for blocksize {blocksize}\"\n )\n assert relerr < expected_rel[blocksize] + 0.02, f\"Mean rel error {relerr:.6f} too high for blocksize {blocksize}\"\n\n\ndef test_parametrization_forward_method():\n \"\"\"Test the Bnb4bitParametrization forward method directly.\"\"\"\n device = \"cpu\"\n\n # Create test tensor and manually quantize it\n original_tensor = torch.randn(64, 32, dtype=torch.float32, device=device)\n quantized_data, quant_state = F.quantize_4bit(original_tensor, quant_type=\"nf4\")\n\n # Create parametrization instance\n parametrization = Bnb4bitParametrization(quant_state)\n\n # Test forward pass (dequantization)\n dequantized = parametrization.forward(quantized_data)\n\n # Verify dequantization produces correct output\n assert dequantized.shape == original_tensor.shape, \"Shape should be preserved during dequantization\"\n assert dequantized.dtype == torch.float32, \"dtype should be preserved\"\n assert dequantized.device == original_tensor.device, \"Device should be preserved\"","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_parametrization_forward_method","uri":"program://bitsandbytes/function/tests.test_parametrize.test_parametrization_forward_method#L362-L388","kind":"function","name":"test_parametrization_forward_method","path":"tests/test_parametrize.py","language":"python","start_line":362,"end_line":388,"context_start_line":342,"context_end_line":408,"code":" # Verify reconstruction works with different block sizes\n reconstructed = module.expert_weights\n assert reconstructed.shape == original_param.shape, \"Shape should be preserved\"\n assert reconstructed.device.type == device, \"Device should match\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Expected error bounds from functional tests (using NF4 bounds since that's what we're testing)\n expected_abs = {64: 0.072792, 128: 0.076835, 256: 0.080326}\n expected_rel = {64: 0.203299, 128: 0.215252, 256: 0.226044}\n\n assert err_mean < expected_abs[blocksize] + 0.01, (\n f\"Mean abs error {err_mean:.6f} too high for blocksize {blocksize}\"\n )\n assert relerr < expected_rel[blocksize] + 0.02, f\"Mean rel error {relerr:.6f} too high for blocksize {blocksize}\"\n\n\ndef test_parametrization_forward_method():\n \"\"\"Test the Bnb4bitParametrization forward method directly.\"\"\"\n device = \"cpu\"\n\n # Create test tensor and manually quantize it\n original_tensor = torch.randn(64, 32, dtype=torch.float32, device=device)\n quantized_data, quant_state = F.quantize_4bit(original_tensor, quant_type=\"nf4\")\n\n # Create parametrization instance\n parametrization = Bnb4bitParametrization(quant_state)\n\n # Test forward pass (dequantization)\n dequantized = parametrization.forward(quantized_data)\n\n # Verify dequantization produces correct output\n assert dequantized.shape == original_tensor.shape, \"Shape should be preserved during dequantization\"\n assert dequantized.dtype == torch.float32, \"dtype should be preserved\"\n assert dequantized.device == original_tensor.device, \"Device should be preserved\"\n\n # Check that dequantization approximates original using mean error calculation\n err = (original_tensor - dequantized.detach()).abs().float()\n relerr = (err / (original_tensor.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use NF4 bounds from functional tests with small margin\n assert err_mean < 0.08, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < 0.25, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_gradient_behavior(device, dtype):\n \"\"\"Test that quantized parameters have proper gradient behavior.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n # Ensure original parameter requires gradients\n module.weight_2d.requires_grad_(True)\n assert module.weight_2d.requires_grad, \"Original parameter should require gradients\"\n\n # Apply quantization parametrization\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\")\n\n # Verify that quantized parameters don't require gradients (expected behavior)\n # The underlying quantized parameter should have requires_grad=False","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.test_gradient_behavior","uri":"program://bitsandbytes/function/tests.test_parametrize.test_gradient_behavior#L393-L411","kind":"function","name":"test_gradient_behavior","path":"tests/test_parametrize.py","language":"python","start_line":393,"end_line":411,"context_start_line":373,"context_end_line":411,"code":" # Test forward pass (dequantization)\n dequantized = parametrization.forward(quantized_data)\n\n # Verify dequantization produces correct output\n assert dequantized.shape == original_tensor.shape, \"Shape should be preserved during dequantization\"\n assert dequantized.dtype == torch.float32, \"dtype should be preserved\"\n assert dequantized.device == original_tensor.device, \"Device should be preserved\"\n\n # Check that dequantization approximates original using mean error calculation\n err = (original_tensor - dequantized.detach()).abs().float()\n relerr = (err / (original_tensor.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use NF4 bounds from functional tests with small margin\n assert err_mean < 0.08, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < 0.25, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_gradient_behavior(device, dtype):\n \"\"\"Test that quantized parameters have proper gradient behavior.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n module = ParametrizeTestModule(device=device, dtype=dtype)\n\n # Ensure original parameter requires gradients\n module.weight_2d.requires_grad_(True)\n assert module.weight_2d.requires_grad, \"Original parameter should require gradients\"\n\n # Apply quantization parametrization\n replace_parameter_4bit(module, \"weight_2d\", quant_type=\"nf4\")\n\n # Verify that quantized parameters don't require gradients (expected behavior)\n # The underlying quantized parameter should have requires_grad=False\n # The dequantized output should also not require gradients\n reconstructed = module.weight_2d\n assert not reconstructed.requires_grad, \"Dequantized parameter should not require gradients\"","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.__init__","uri":"program://bitsandbytes/function/tests.test_parametrize.__init__#L203-L206","kind":"function","name":"__init__","path":"tests/test_parametrize.py","language":"python","start_line":203,"end_line":206,"context_start_line":183,"context_end_line":226,"code":"\n # Verify parametrization internals are NOT saved (clean state dict)\n assert \"parametrizations.expert_weights.original\" not in state_dict, (\n \"Internal parametrization keys should not be saved\"\n )\n\n # Test that the parameter can be accessed after state dict creation\n reconstructed = module.expert_weights\n assert reconstructed.shape == (8, 512, 256), \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_realistic_forward(device, dtype):\n \"\"\"Test realistic MoE forward computation with quantized expert weights.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n class SimpleMoE(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n # Expert weights: [num_experts, input_dim, output_dim]\n self.expert_weights = nn.Parameter(torch.randn(4, 32, 64, dtype=dtype, device=device))\n\n def forward(self, x, expert_idx=0):\n # Select and use specific expert weight matrix\n expert_weight = self.expert_weights[expert_idx] # Shape: [input_dim, output_dim]\n return torch.matmul(x, expert_weight)\n\n module = SimpleMoE(device=device, dtype=dtype)\n x = torch.randn(8, 32, dtype=dtype, device=device)\n\n # Get reference output before quantization\n with torch.no_grad():\n reference_output = module(x, expert_idx=1)\n\n # Apply 4-bit quantization to expert weights\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\")\n\n # Get output after quantization - should be very close to original\n with torch.no_grad():\n quantized_output = module(x, expert_idx=1)\n","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.MoEModule","uri":"program://bitsandbytes/class/tests.test_parametrize.MoEModule#L101-L104","kind":"class","name":"MoEModule","path":"tests/test_parametrize.py","language":"python","start_line":101,"end_line":104,"context_start_line":81,"context_end_line":124,"code":" 64: {\"abs\": 0.096545, \"rel\": 0.260130},\n 128: {\"abs\": 0.102947, \"rel\": 0.275734},\n 256: {\"abs\": 0.108685, \"rel\": 0.289842},\n },\n }\n\n assert err_mean < expected_errors[quant_type][blocksize][\"abs\"] + 1e-3, f\"Mean abs error {err_mean:.6f} too high\"\n assert relerr < expected_errors[quant_type][blocksize][\"rel\"] + 1e-3, f\"Mean rel error {relerr:.6f} too high\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_parameter_shape(device, dtype):\n \"\"\"Test parametrization with MoE-style parameter shape\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n param_shape = (8, 64, 32)\n\n # Create module with custom parameter shape directly on target device\n class MoEModule(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n self.param = nn.Parameter(torch.randn(*param_shape, dtype=dtype, device=device))\n\n module = MoEModule(device=device, dtype=dtype)\n original_param = module.param.clone()\n\n # Apply quantization parametrization\n replace_parameter_4bit(module, \"param\", quant_type=\"nf4\")\n\n # Verify reconstruction maintains all properties\n reconstructed = module.param\n assert reconstructed.shape == param_shape, f\"Shape should be preserved: {reconstructed.shape} vs {param_shape}\"\n assert reconstructed.dtype == dtype, \"dtype should match original\"\n assert reconstructed.device.type == device, \"Device should match target\"\n\n # Verify quantization quality using error calculation approach from functional tests\n err = (original_param - reconstructed.detach()).abs().float()\n relerr = (err / (original_param.abs().float() + 1e-8)).mean()\n err_mean = err.mean()\n\n # Use slightly looser bounds for higher dimensional tensors\n abs_bound = 0.085 # NF4 baseline + margin","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.SimpleMoE","uri":"program://bitsandbytes/class/tests.test_parametrize.SimpleMoE#L202-L211","kind":"class","name":"SimpleMoE","path":"tests/test_parametrize.py","language":"python","start_line":202,"end_line":211,"context_start_line":182,"context_end_line":231,"code":" assert f\"expert_weights.quant_state.bitsandbytes__{quant_type}\" in state_dict, \"Quant state should be saved\"\n\n # Verify parametrization internals are NOT saved (clean state dict)\n assert \"parametrizations.expert_weights.original\" not in state_dict, (\n \"Internal parametrization keys should not be saved\"\n )\n\n # Test that the parameter can be accessed after state dict creation\n reconstructed = module.expert_weights\n assert reconstructed.shape == (8, 512, 256), \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_realistic_forward(device, dtype):\n \"\"\"Test realistic MoE forward computation with quantized expert weights.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n class SimpleMoE(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n # Expert weights: [num_experts, input_dim, output_dim]\n self.expert_weights = nn.Parameter(torch.randn(4, 32, 64, dtype=dtype, device=device))\n\n def forward(self, x, expert_idx=0):\n # Select and use specific expert weight matrix\n expert_weight = self.expert_weights[expert_idx] # Shape: [input_dim, output_dim]\n return torch.matmul(x, expert_weight)\n\n module = SimpleMoE(device=device, dtype=dtype)\n x = torch.randn(8, 32, dtype=dtype, device=device)\n\n # Get reference output before quantization\n with torch.no_grad():\n reference_output = module(x, expert_idx=1)\n\n # Apply 4-bit quantization to expert weights\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\")\n\n # Get output after quantization - should be very close to original\n with torch.no_grad():\n quantized_output = module(x, expert_idx=1)\n\n # Verify outputs match within quantization tolerance\n assert quantized_output.shape == reference_output.shape, \"Output shape should be preserved\"\n\n # Calculate error like functional tests (matrix ops may amplify quantization errors)\n err = (reference_output - quantized_output).abs().float()","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_parametrize.forward","uri":"program://bitsandbytes/function/tests.test_parametrize.forward#L208-L211","kind":"function","name":"forward","path":"tests/test_parametrize.py","language":"python","start_line":208,"end_line":211,"context_start_line":188,"context_end_line":231,"code":"\n # Test that the parameter can be accessed after state dict creation\n reconstructed = module.expert_weights\n assert reconstructed.shape == (8, 512, 256), \"Shape should be preserved\"\n assert reconstructed.dtype == dtype, \"dtype should match\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\ndef test_moe_realistic_forward(device, dtype):\n \"\"\"Test realistic MoE forward computation with quantized expert weights.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(\"nf4\", dtype):\n pytest.skip(\"Configuration not supported on HPU.\")\n\n class SimpleMoE(nn.Module):\n def __init__(self, device, dtype):\n super().__init__()\n # Expert weights: [num_experts, input_dim, output_dim]\n self.expert_weights = nn.Parameter(torch.randn(4, 32, 64, dtype=dtype, device=device))\n\n def forward(self, x, expert_idx=0):\n # Select and use specific expert weight matrix\n expert_weight = self.expert_weights[expert_idx] # Shape: [input_dim, output_dim]\n return torch.matmul(x, expert_weight)\n\n module = SimpleMoE(device=device, dtype=dtype)\n x = torch.randn(8, 32, dtype=dtype, device=device)\n\n # Get reference output before quantization\n with torch.no_grad():\n reference_output = module(x, expert_idx=1)\n\n # Apply 4-bit quantization to expert weights\n replace_parameter_4bit(module, \"expert_weights\", quant_type=\"nf4\")\n\n # Get output after quantization - should be very close to original\n with torch.no_grad():\n quantized_output = module(x, expert_idx=1)\n\n # Verify outputs match within quantization tolerance\n assert quantized_output.shape == reference_output.shape, \"Output shape should be preserved\"\n\n # Calculate error like functional tests (matrix ops may amplify quantization errors)\n err = (reference_output - quantized_output).abs().float()","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules","uri":"program://bitsandbytes/module/tests.test_modules#L1-L539","kind":"module","name":"tests.test_modules","path":"tests/test_modules.py","language":"python","start_line":1,"end_line":539,"context_start_line":1,"context_end_line":539,"code":"import inspect\n\nimport pytest\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\nfrom tests.helpers import get_available_devices, id_formatter, is_supported_on_hpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass MLP8bit(torch.nn.Module):\n def __init__(self, dim1, dim2, has_fp16_weights=True, threshold=0.0):\n super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,\n dim2,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n self.fc2 = bnb.nn.Linear8bitLt(\n dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))\ndef test_linear8bitlt_inference(device, threshold):\n l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert l1.weight.device.type == device\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device).half()\n o1 = l1(b1)\n if i == 1:\n assert l1.state.CB is not None\n\n\n# TODO: Remove support for training int8 weights\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_linear8bitlt_accumulated_gradient(device):\n if device != \"cuda\":\n pytest.skip(\"Only supported on CUDA\")\n\n l1 = torch.nn.Sequential(*[bnb.nn.Linear8bitLt(32, 32).to(device).half() for i in range(2)])\n l2 = torch.nn.Sequential(*[torch.nn.Linear(32, 32).to(device).half() for i in range(2)])\n l1[0].weight.data.copy_(l2[0].weight.data)\n l1[1].weight.data.copy_(l2[1].weight.data)\n l1[0].bias.data.copy_(l2[0].bias.data)\n l1[1].bias.data.copy_(l2[1].bias.data)\n\n opt1 = bnb.optim.Adam32bit(l1.parameters(), lr=0.001)\n opt2 = bnb.optim.Adam32bit(l2.parameters(), lr=0.001)\n\n acc_steps = 10\n\n for i in range(15):\n b1 = torch.randn(16, 8, 32, device=device).half()\n o1 = l1(b1)\n o2 = l2(b1)\n loss1 = o1.mean()\n loss2 = o2.mean()\n loss1.backward()\n loss2.backward()\n if i == 2:\n assert l1[0].state.CB is not None\n assert l1[1].state.CB is not None\n\n if i > 0 and i % acc_steps == 0:\n opt1.step()\n opt1.zero_grad(True)\n opt2.step()\n opt2.zero_grad(True)\n assert_all_approx_close(l1[0].weight, l2[0].weight, rtol=1.05, atol=0.01, count=2)\n assert_all_approx_close(l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2)\n # we do this copy because otherwise we have small divergences over time that add up\n l1[0].weight.data.copy_(l2[0].weight.data)\n l1[1].weight.data.copy_(l2[1].weight.data)\n l1[0].bias.data.copy_(l2[0].bias.data)\n l1[1].bias.data.copy_(l2[1].bias.data)\n else:\n assert_all_approx_close(l1[0].weight.grad, l2[0].weight.grad, rtol=1.05, atol=0.04, count=1)\n assert_all_approx_close(l1[1].weight.grad, l2[1].weight.grad, rtol=1.05, atol=0.04, count=1)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 2.0])\ndef test_linear8bitlt_no_fp16_weights(device, threshold):\n l1 = (\n bnb.nn.Linear8bitLt(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n .to(device)\n .half()\n )\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = l1(b1)\n assert o1.dtype == torch.float16\n\n mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).to(device)\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n\n mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n\n mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).half().to(device)\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n\n mlp = (\n MLP8bit(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n .half()\n .to(device)\n )\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n assert mlp.fc1.weight.device.type == device\n assert mlp.fc2.weight.device.type == device\n\n mlp = MLP8bit(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n w1, w2 = mlp.fc1.weight.clone().to(device), mlp.fc2.weight.clone().to(device) # grab weights before quantization,\n mlp = mlp.to(device).half() # and this line triggers quantization\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n assert mlp.fc1.weight.device.type == device\n assert mlp.fc2.weight.device.type == device\n\n b1 = torch.randn(16, 8, 32, device=device, requires_grad=True, dtype=torch.half)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n assert o1.requires_grad\n grad_proj = torch.randn_like(o1)\n\n mlp.zero_grad()\n (o1 * grad_proj).sum().backward()\n grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()\n scale = grad_ref.abs().mean()\n\n torch.testing.assert_close(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)\n idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)\n assert (idx == 0).sum().item() <= b1.numel() * 0.005\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\n \"module\",\n [\n lambda n_in, n_out, bias=True: bnb.nn.Linear8bitLt(n_in, n_out, bias=bias, has_fp16_weights=False),\n bnb.nn.LinearNF4,\n ],\n ids=[\"Int8Lt\", \"NF4\"],\n)\ndef test_linear_kbit_fp32_bias(device, module):\n # casts model to fp16 -> int8 automatically\n l1 = module(32, 64).to(device)\n assert l1.weight.dtype in [torch.int8, torch.uint8]\n assert l1.bias.dtype == torch.float32\n\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n # casts bias to fp32\n o1 = l1(b1)\n assert l1.bias.dtype == torch.float16\n\n # casts model to fp16 -> int8 automatically\n l1 = module(32, 64, bias=False).to(device)\n assert l1.weight.dtype in [torch.int8, torch.uint8]\n assert l1.bias is None\n\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = l1(b1)\n assert l1.bias is None\n\n\nmodule_dict = {\n \"Int8Lt\": bnb.nn.Linear8bitLt,\n \"4bit\": bnb.nn.Linear4bit,\n \"FP4\": bnb.nn.LinearFP4,\n \"NF4\": bnb.nn.LinearNF4,\n \"FP4+C\": lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compress_statistics=True),\n \"NF4+C\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compress_statistics=True),\n \"NF4+fp32\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.float32),\n \"NF4+fp16\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.float16),\n \"NF4+bf16\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.bfloat16),\n}\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"module\", module_dict.values(), ids=module_dict.keys())\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16])\ndef test_kbit_backprop(device, module, dtype):\n b = 16\n dim1 = 36\n dim2 = 84\n # dim1 = 37\n # dim2 = 83\n\n ref = nn.Sequential(*[torch.nn.Linear(dim1, dim2), torch.nn.Linear(dim2, 128)])\n torch.nn.init.kaiming_normal_(ref[0].weight)\n torch.nn.init.kaiming_normal_(ref[1].weight)\n ref[1].weight.requires_grad_(False)\n\n kbit = nn.Sequential(*[torch.nn.Linear(dim1, dim2), module(dim2, 128)])\n\n if (\n device == \"hpu\"\n and isinstance(kbit[1], bnb.nn.Linear4bit)\n and not is_supported_on_hpu(kbit[1].weight.quant_type, dtype)\n ):\n pytest.skip(\"This configuration not supported on HPU\")\n\n kbit[0].weight.detach().copy_(ref[0].weight)\n kbit[1].weight.detach().copy_(ref[1].weight)\n kbit[0].bias.detach().copy_(ref[0].bias)\n kbit[1].bias.detach().copy_(ref[1].bias)\n kbit[1].weight.requires_grad_(False)\n ref = ref.to(device=device, dtype=dtype)\n kbit = kbit.to(device=device, dtype=dtype)\n kbit = kbit.to(device=device, dtype=dtype)\n\n errs1 = []\n errs2 = []\n relerrs1 = []\n relerrs2 = []\n for i in range(100):\n batch = torch.randn(b, dim1, device=device, dtype=dtype)\n out1 = ref(batch)\n out2 = kbit(batch)\n out1.mean().backward()\n out2.mean().backward()\n\n grad1 = ref[0].weight.grad\n grad2 = kbit[0].weight.grad\n bgrad1 = ref[0].bias.grad\n bgrad2 = kbit[0].bias.grad\n\n err1 = (out1 - out2).abs().float()\n err2 = (grad1 - grad2).abs().float()\n relerr1 = err1 / (out1.abs().float() + 1e-9)\n relerr2 = err2 / (grad1.abs().float() + 1e-9)\n errs1.append(err1.mean().item())\n errs2.append(err2.mean().item())\n relerrs1.append(relerr1.mean().item())\n relerrs2.append(relerr2.mean().item())\n\n if isinstance(module, bnb.nn.Linear8bitLt):\n assert_all_approx_close(grad1, grad2, atol=0.008, rtol=0.05, count=1)\n torch.testing.assert_close(bgrad1, bgrad2, atol=0.008, rtol=0.05)\n else:\n assert_all_approx_close(grad1, grad2, atol=0.015, rtol=0.05, count=1)\n torch.testing.assert_close(bgrad1, bgrad2, atol=0.02, rtol=0.05)\n ref.zero_grad()\n kbit.zero_grad()\n\n assert kbit[0].weight.grad is None or kbit[0].weight.grad.sum().item() == 0\n assert kbit[0].weight.grad is None or kbit[0].bias.grad.sum().item() == 0\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"embedding_dim\", [64, 65])\n@pytest.mark.parametrize(\"input_shape\", [(10,), (10, 10), (10, 10, 10)], ids=str)\n@pytest.mark.parametrize(\n \"embedding_class,quant_storage\",\n [\n (bnb.nn.Embedding8bit, None),\n (bnb.nn.EmbeddingFP4, torch.uint8),\n (bnb.nn.EmbeddingFP4, torch.float32),\n (bnb.nn.EmbeddingNF4, torch.uint8),\n (bnb.nn.EmbeddingNF4, torch.float32),\n ],\n ids=lambda x: x.__name__ if inspect.isclass(x) else str(x),\n)\ndef test_embedding_lossless(device, embedding_class, input_shape, embedding_dim, quant_storage):\n if device == \"hpu\":\n if embedding_class is bnb.nn.EmbeddingFP4:\n pytest.skip(\"FP4 is not supported on HPU\")\n elif embedding_class is bnb.nn.EmbeddingNF4 and not is_supported_on_hpu(\"nf4\", torch.float32, quant_storage):\n pytest.skip(\"This configuration is not supported on HPU\")\n\n num_embeddings = 128\n\n src_weight = (torch.randn((num_embeddings, embedding_dim), dtype=torch.float32) > 0).to(\n torch.float32\n ) * 2 - 1 # Embeddings filled with {-1, 1} values. It should compress losslessly\n\n emb_base = nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim,\n _freeze=True,\n _weight=src_weight,\n )\n if embedding_class is bnb.nn.Embedding8bit:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n else:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim, quant_storage=quant_storage)\n\n e.load_state_dict(emb_base.state_dict())\n\n emb_base.to(device)\n e.to(device)\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=input_shape, device=device)\n\n torch.testing.assert_close(\n actual=e(input_tokens),\n expected=emb_base(input_tokens),\n )\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"embedding_dim\", [64, 65])\n@pytest.mark.parametrize(\"input_shape\", [(10,), (10, 10), (10, 10, 10)], ids=str)\n@pytest.mark.parametrize(\n \"embedding_class,quant_storage\",\n [\n (bnb.nn.Embedding8bit, None),\n (bnb.nn.EmbeddingFP4, torch.uint8),\n (bnb.nn.EmbeddingFP4, torch.float32),\n (bnb.nn.EmbeddingNF4, torch.uint8),\n (bnb.nn.EmbeddingNF4, torch.float32),\n ],\n ids=lambda x: x.__name__ if inspect.isclass(x) else str(x),\n)\ndef test_embedding_error(device, embedding_class, input_shape, embedding_dim, quant_storage):\n if device == \"hpu\":\n if embedding_class is bnb.nn.EmbeddingFP4:\n pytest.skip(\"FP4 is not supported on HPU\")\n elif embedding_class is bnb.nn.EmbeddingNF4 and not is_supported_on_hpu(\"nf4\", torch.float32, quant_storage):\n pytest.skip(\"This configuration is not supported on HPU\")\n\n is_8bit = embedding_class is bnb.nn.Embedding8bit\n\n num_embeddings = 128\n\n src_weight = torch.rand((num_embeddings, embedding_dim), dtype=torch.float32)\n\n emb_base = nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim,\n _freeze=True,\n _weight=src_weight,\n )\n if is_8bit:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n else:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim, quant_storage=quant_storage)\n\n e.load_state_dict(emb_base.state_dict())\n\n emb_base.to(device)\n e.to(device)\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=input_shape, device=device)\n\n torch.testing.assert_close(\n actual=e(input_tokens),\n expected=emb_base(input_tokens),\n atol=0.05 if is_8bit else 0.20,\n rtol=0.0,\n )\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_linear_warnings(device):\n dim1 = 64\n\n with pytest.warns(UserWarning, match=r\"inference or training\"):\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(10, dim1, device=device, dtype=torch.float16)\n net(inp)\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n with pytest.warns(UserWarning) as record:\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(10, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n assert len(record) == 2\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_embedding_warnings(device):\n num_embeddings = 128\n default_block_size = 64\n\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = bnb.nn.Embedding4bit(\n num_embeddings=num_embeddings, embedding_dim=default_block_size + 1, quant_type=\"nf4\"\n )\n net.to(device)\n inp = torch.randint(low=0, high=num_embeddings, size=(1,), device=device)\n net(inp)\n\n\ndef test_4bit_embedding_weight_fsdp_fix(requires_cuda):\n num_embeddings = 64\n embedding_dim = 32\n\n module = bnb.nn.Embedding4bit(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=(1,), device=\"cuda\")\n\n module(input_tokens)\n\n assert module.weight.quant_state is not None\n\n\ndef test_4bit_linear_weight_fsdp_fix(requires_cuda):\n inp_size = 64\n out_size = 32\n\n module = bnb.nn.Linear4bit(inp_size, out_size)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tensor = torch.randn((1, inp_size), device=\"cuda\")\n\n module(input_tensor)\n\n assert module.weight.quant_state is not None\n\n\ndef test_embedding_not_implemented_error():\n with pytest.raises(NotImplementedError):\n emb = bnb.nn.Embedding4bit(32, 32)\n emb.state_dict()\n\n with pytest.raises(NotImplementedError):\n emb = bnb.nn.Embedding8bit(32, 32)\n emb.state_dict()","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.MockArgs","uri":"program://bitsandbytes/class/tests.test_modules.MockArgs#L11-L14","kind":"class","name":"MockArgs","path":"tests/test_modules.py","language":"python","start_line":11,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"import inspect\n\nimport pytest\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\nfrom tests.helpers import get_available_devices, id_formatter, is_supported_on_hpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass MLP8bit(torch.nn.Module):\n def __init__(self, dim1, dim2, has_fp16_weights=True, threshold=0.0):\n super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,\n dim2,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n self.fc2 = bnb.nn.Linear8bitLt(\n dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.MLP8bit","uri":"program://bitsandbytes/class/tests.test_modules.MLP8bit#L17-L36","kind":"class","name":"MLP8bit","path":"tests/test_modules.py","language":"python","start_line":17,"end_line":36,"context_start_line":1,"context_end_line":56,"code":"import inspect\n\nimport pytest\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\nfrom tests.helpers import get_available_devices, id_formatter, is_supported_on_hpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass MLP8bit(torch.nn.Module):\n def __init__(self, dim1, dim2, has_fp16_weights=True, threshold=0.0):\n super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,\n dim2,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n self.fc2 = bnb.nn.Linear8bitLt(\n dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.get_args","uri":"program://bitsandbytes/function/tests.test_modules.get_args#L39-L44","kind":"function","name":"get_args","path":"tests/test_modules.py","language":"python","start_line":39,"end_line":44,"context_start_line":19,"context_end_line":64,"code":" super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,\n dim2,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n self.fc2 = bnb.nn.Linear8bitLt(\n dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))\ndef test_linear8bitlt_inference(device, threshold):\n l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert l1.weight.device.type == device\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device).half()","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.assert_all_approx_close","uri":"program://bitsandbytes/function/tests.test_modules.assert_all_approx_close#L47-L52","kind":"function","name":"assert_all_approx_close","path":"tests/test_modules.py","language":"python","start_line":47,"end_line":52,"context_start_line":27,"context_end_line":72,"code":" dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))\ndef test_linear8bitlt_inference(device, threshold):\n l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert l1.weight.device.type == device\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device).half()\n o1 = l1(b1)\n if i == 1:\n assert l1.state.CB is not None\n\n\n# TODO: Remove support for training int8 weights\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_linear8bitlt_accumulated_gradient(device):","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_linear8bitlt_inference","uri":"program://bitsandbytes/function/tests.test_modules.test_linear8bitlt_inference#L57-L67","kind":"function","name":"test_linear8bitlt_inference","path":"tests/test_modules.py","language":"python","start_line":57,"end_line":67,"context_start_line":37,"context_end_line":87,"code":"\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))\ndef test_linear8bitlt_inference(device, threshold):\n l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert l1.weight.device.type == device\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device).half()\n o1 = l1(b1)\n if i == 1:\n assert l1.state.CB is not None\n\n\n# TODO: Remove support for training int8 weights\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_linear8bitlt_accumulated_gradient(device):\n if device != \"cuda\":\n pytest.skip(\"Only supported on CUDA\")\n\n l1 = torch.nn.Sequential(*[bnb.nn.Linear8bitLt(32, 32).to(device).half() for i in range(2)])\n l2 = torch.nn.Sequential(*[torch.nn.Linear(32, 32).to(device).half() for i in range(2)])\n l1[0].weight.data.copy_(l2[0].weight.data)\n l1[1].weight.data.copy_(l2[1].weight.data)\n l1[0].bias.data.copy_(l2[0].bias.data)\n l1[1].bias.data.copy_(l2[1].bias.data)\n\n opt1 = bnb.optim.Adam32bit(l1.parameters(), lr=0.001)\n opt2 = bnb.optim.Adam32bit(l2.parameters(), lr=0.001)\n\n acc_steps = 10\n","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_linear8bitlt_accumulated_gradient","uri":"program://bitsandbytes/function/tests.test_modules.test_linear8bitlt_accumulated_gradient#L72-L114","kind":"function","name":"test_linear8bitlt_accumulated_gradient","path":"tests/test_modules.py","language":"python","start_line":72,"end_line":114,"context_start_line":52,"context_end_line":134,"code":" torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))\ndef test_linear8bitlt_inference(device, threshold):\n l1 = bnb.nn.Linear8bitLt(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert l1.weight.device.type == device\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device).half()\n o1 = l1(b1)\n if i == 1:\n assert l1.state.CB is not None\n\n\n# TODO: Remove support for training int8 weights\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_linear8bitlt_accumulated_gradient(device):\n if device != \"cuda\":\n pytest.skip(\"Only supported on CUDA\")\n\n l1 = torch.nn.Sequential(*[bnb.nn.Linear8bitLt(32, 32).to(device).half() for i in range(2)])\n l2 = torch.nn.Sequential(*[torch.nn.Linear(32, 32).to(device).half() for i in range(2)])\n l1[0].weight.data.copy_(l2[0].weight.data)\n l1[1].weight.data.copy_(l2[1].weight.data)\n l1[0].bias.data.copy_(l2[0].bias.data)\n l1[1].bias.data.copy_(l2[1].bias.data)\n\n opt1 = bnb.optim.Adam32bit(l1.parameters(), lr=0.001)\n opt2 = bnb.optim.Adam32bit(l2.parameters(), lr=0.001)\n\n acc_steps = 10\n\n for i in range(15):\n b1 = torch.randn(16, 8, 32, device=device).half()\n o1 = l1(b1)\n o2 = l2(b1)\n loss1 = o1.mean()\n loss2 = o2.mean()\n loss1.backward()\n loss2.backward()\n if i == 2:\n assert l1[0].state.CB is not None\n assert l1[1].state.CB is not None\n\n if i > 0 and i % acc_steps == 0:\n opt1.step()\n opt1.zero_grad(True)\n opt2.step()\n opt2.zero_grad(True)\n assert_all_approx_close(l1[0].weight, l2[0].weight, rtol=1.05, atol=0.01, count=2)\n assert_all_approx_close(l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2)\n # we do this copy because otherwise we have small divergences over time that add up\n l1[0].weight.data.copy_(l2[0].weight.data)\n l1[1].weight.data.copy_(l2[1].weight.data)\n l1[0].bias.data.copy_(l2[0].bias.data)\n l1[1].bias.data.copy_(l2[1].bias.data)\n else:\n assert_all_approx_close(l1[0].weight.grad, l2[0].weight.grad, rtol=1.05, atol=0.04, count=1)\n assert_all_approx_close(l1[1].weight.grad, l2[1].weight.grad, rtol=1.05, atol=0.04, count=1)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 2.0])\ndef test_linear8bitlt_no_fp16_weights(device, threshold):\n l1 = (\n bnb.nn.Linear8bitLt(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n .to(device)\n .half()\n )\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_linear8bitlt_no_fp16_weights","uri":"program://bitsandbytes/function/tests.test_modules.test_linear8bitlt_no_fp16_weights#L119-L232","kind":"function","name":"test_linear8bitlt_no_fp16_weights","path":"tests/test_modules.py","language":"python","start_line":119,"end_line":232,"context_start_line":99,"context_end_line":252,"code":"\n if i > 0 and i % acc_steps == 0:\n opt1.step()\n opt1.zero_grad(True)\n opt2.step()\n opt2.zero_grad(True)\n assert_all_approx_close(l1[0].weight, l2[0].weight, rtol=1.05, atol=0.01, count=2)\n assert_all_approx_close(l1[1].weight, l2[1].weight, rtol=1.05, atol=0.01, count=2)\n # we do this copy because otherwise we have small divergences over time that add up\n l1[0].weight.data.copy_(l2[0].weight.data)\n l1[1].weight.data.copy_(l2[1].weight.data)\n l1[0].bias.data.copy_(l2[0].bias.data)\n l1[1].bias.data.copy_(l2[1].bias.data)\n else:\n assert_all_approx_close(l1[0].weight.grad, l2[0].weight.grad, rtol=1.05, atol=0.04, count=1)\n assert_all_approx_close(l1[1].weight.grad, l2[1].weight.grad, rtol=1.05, atol=0.04, count=1)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 2.0])\ndef test_linear8bitlt_no_fp16_weights(device, threshold):\n l1 = (\n bnb.nn.Linear8bitLt(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n .to(device)\n .half()\n )\n assert l1.weight.dtype == torch.int8\n\n l1.eval()\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = l1(b1)\n assert o1.dtype == torch.float16\n\n mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).to(device)\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n\n mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).to(device).half()\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n\n mlp = MLP8bit(32, 64, threshold=threshold, has_fp16_weights=False).half().to(device)\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n\n mlp = (\n MLP8bit(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n .half()\n .to(device)\n )\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n assert mlp.fc1.weight.device.type == device\n assert mlp.fc2.weight.device.type == device\n\n mlp = MLP8bit(\n 32,\n 64,\n threshold=threshold,\n has_fp16_weights=False,\n )\n w1, w2 = mlp.fc1.weight.clone().to(device), mlp.fc2.weight.clone().to(device) # grab weights before quantization,\n mlp = mlp.to(device).half() # and this line triggers quantization\n\n for i in range(4):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n if threshold > 0 and device not in (\"cpu\", \"xpu\"):\n assert mlp.fc1.state.idx is not None\n assert mlp.fc2.state.idx is not None\n\n assert mlp.fc1.weight.dtype == torch.int8\n assert mlp.fc2.weight.dtype == torch.int8\n assert mlp.fc1.weight.device.type == device\n assert mlp.fc2.weight.device.type == device\n\n b1 = torch.randn(16, 8, 32, device=device, requires_grad=True, dtype=torch.half)\n o1 = mlp(b1)\n assert o1.dtype == torch.float16\n assert o1.requires_grad\n grad_proj = torch.randn_like(o1)\n\n mlp.zero_grad()\n (o1 * grad_proj).sum().backward()\n grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()\n scale = grad_ref.abs().mean()\n\n torch.testing.assert_close(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)\n idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)\n assert (idx == 0).sum().item() <= b1.numel() * 0.005\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\n \"module\",\n [\n lambda n_in, n_out, bias=True: bnb.nn.Linear8bitLt(n_in, n_out, bias=bias, has_fp16_weights=False),\n bnb.nn.LinearNF4,\n ],\n ids=[\"Int8Lt\", \"NF4\"],\n)\ndef test_linear_kbit_fp32_bias(device, module):\n # casts model to fp16 -> int8 automatically\n l1 = module(32, 64).to(device)\n assert l1.weight.dtype in [torch.int8, torch.uint8]\n assert l1.bias.dtype == torch.float32\n\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n # casts bias to fp32","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_linear_kbit_fp32_bias","uri":"program://bitsandbytes/function/tests.test_modules.test_linear_kbit_fp32_bias#L244-L264","kind":"function","name":"test_linear_kbit_fp32_bias","path":"tests/test_modules.py","language":"python","start_line":244,"end_line":264,"context_start_line":224,"context_end_line":284,"code":"\n mlp.zero_grad()\n (o1 * grad_proj).sum().backward()\n grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()\n scale = grad_ref.abs().mean()\n\n torch.testing.assert_close(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)\n idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)\n assert (idx == 0).sum().item() <= b1.numel() * 0.005\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\n \"module\",\n [\n lambda n_in, n_out, bias=True: bnb.nn.Linear8bitLt(n_in, n_out, bias=bias, has_fp16_weights=False),\n bnb.nn.LinearNF4,\n ],\n ids=[\"Int8Lt\", \"NF4\"],\n)\ndef test_linear_kbit_fp32_bias(device, module):\n # casts model to fp16 -> int8 automatically\n l1 = module(32, 64).to(device)\n assert l1.weight.dtype in [torch.int8, torch.uint8]\n assert l1.bias.dtype == torch.float32\n\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n # casts bias to fp32\n o1 = l1(b1)\n assert l1.bias.dtype == torch.float16\n\n # casts model to fp16 -> int8 automatically\n l1 = module(32, 64, bias=False).to(device)\n assert l1.weight.dtype in [torch.int8, torch.uint8]\n assert l1.bias is None\n\n for i in range(100):\n b1 = torch.randn(16, 8, 32, device=device, dtype=torch.float16)\n o1 = l1(b1)\n assert l1.bias is None\n\n\nmodule_dict = {\n \"Int8Lt\": bnb.nn.Linear8bitLt,\n \"4bit\": bnb.nn.Linear4bit,\n \"FP4\": bnb.nn.LinearFP4,\n \"NF4\": bnb.nn.LinearNF4,\n \"FP4+C\": lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compress_statistics=True),\n \"NF4+C\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compress_statistics=True),\n \"NF4+fp32\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.float32),\n \"NF4+fp16\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.float16),\n \"NF4+bf16\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.bfloat16),\n}\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"module\", module_dict.values(), ids=module_dict.keys())\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16])\ndef test_kbit_backprop(device, module, dtype):\n b = 16","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_kbit_backprop","uri":"program://bitsandbytes/function/tests.test_modules.test_kbit_backprop#L283-L348","kind":"function","name":"test_kbit_backprop","path":"tests/test_modules.py","language":"python","start_line":283,"end_line":348,"context_start_line":263,"context_end_line":368,"code":" o1 = l1(b1)\n assert l1.bias is None\n\n\nmodule_dict = {\n \"Int8Lt\": bnb.nn.Linear8bitLt,\n \"4bit\": bnb.nn.Linear4bit,\n \"FP4\": bnb.nn.LinearFP4,\n \"NF4\": bnb.nn.LinearNF4,\n \"FP4+C\": lambda d1, d2: bnb.nn.LinearFP4(d1, d2, compress_statistics=True),\n \"NF4+C\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compress_statistics=True),\n \"NF4+fp32\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.float32),\n \"NF4+fp16\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.float16),\n \"NF4+bf16\": lambda d1, d2: bnb.nn.LinearNF4(d1, d2, compute_dtype=torch.bfloat16),\n}\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"module\", module_dict.values(), ids=module_dict.keys())\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16])\ndef test_kbit_backprop(device, module, dtype):\n b = 16\n dim1 = 36\n dim2 = 84\n # dim1 = 37\n # dim2 = 83\n\n ref = nn.Sequential(*[torch.nn.Linear(dim1, dim2), torch.nn.Linear(dim2, 128)])\n torch.nn.init.kaiming_normal_(ref[0].weight)\n torch.nn.init.kaiming_normal_(ref[1].weight)\n ref[1].weight.requires_grad_(False)\n\n kbit = nn.Sequential(*[torch.nn.Linear(dim1, dim2), module(dim2, 128)])\n\n if (\n device == \"hpu\"\n and isinstance(kbit[1], bnb.nn.Linear4bit)\n and not is_supported_on_hpu(kbit[1].weight.quant_type, dtype)\n ):\n pytest.skip(\"This configuration not supported on HPU\")\n\n kbit[0].weight.detach().copy_(ref[0].weight)\n kbit[1].weight.detach().copy_(ref[1].weight)\n kbit[0].bias.detach().copy_(ref[0].bias)\n kbit[1].bias.detach().copy_(ref[1].bias)\n kbit[1].weight.requires_grad_(False)\n ref = ref.to(device=device, dtype=dtype)\n kbit = kbit.to(device=device, dtype=dtype)\n kbit = kbit.to(device=device, dtype=dtype)\n\n errs1 = []\n errs2 = []\n relerrs1 = []\n relerrs2 = []\n for i in range(100):\n batch = torch.randn(b, dim1, device=device, dtype=dtype)\n out1 = ref(batch)\n out2 = kbit(batch)\n out1.mean().backward()\n out2.mean().backward()\n\n grad1 = ref[0].weight.grad\n grad2 = kbit[0].weight.grad\n bgrad1 = ref[0].bias.grad\n bgrad2 = kbit[0].bias.grad\n\n err1 = (out1 - out2).abs().float()\n err2 = (grad1 - grad2).abs().float()\n relerr1 = err1 / (out1.abs().float() + 1e-9)\n relerr2 = err2 / (grad1.abs().float() + 1e-9)\n errs1.append(err1.mean().item())\n errs2.append(err2.mean().item())\n relerrs1.append(relerr1.mean().item())\n relerrs2.append(relerr2.mean().item())\n\n if isinstance(module, bnb.nn.Linear8bitLt):\n assert_all_approx_close(grad1, grad2, atol=0.008, rtol=0.05, count=1)\n torch.testing.assert_close(bgrad1, bgrad2, atol=0.008, rtol=0.05)\n else:\n assert_all_approx_close(grad1, grad2, atol=0.015, rtol=0.05, count=1)\n torch.testing.assert_close(bgrad1, bgrad2, atol=0.02, rtol=0.05)\n ref.zero_grad()\n kbit.zero_grad()\n\n assert kbit[0].weight.grad is None or kbit[0].weight.grad.sum().item() == 0\n assert kbit[0].weight.grad is None or kbit[0].bias.grad.sum().item() == 0\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"embedding_dim\", [64, 65])\n@pytest.mark.parametrize(\"input_shape\", [(10,), (10, 10), (10, 10, 10)], ids=str)\n@pytest.mark.parametrize(\n \"embedding_class,quant_storage\",\n [\n (bnb.nn.Embedding8bit, None),\n (bnb.nn.EmbeddingFP4, torch.uint8),\n (bnb.nn.EmbeddingFP4, torch.float32),\n (bnb.nn.EmbeddingNF4, torch.uint8),\n (bnb.nn.EmbeddingNF4, torch.float32),\n ],\n ids=lambda x: x.__name__ if inspect.isclass(x) else str(x),\n)\ndef test_embedding_lossless(device, embedding_class, input_shape, embedding_dim, quant_storage):\n if device == \"hpu\":\n if embedding_class is bnb.nn.EmbeddingFP4:\n pytest.skip(\"FP4 is not supported on HPU\")","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_embedding_lossless","uri":"program://bitsandbytes/function/tests.test_modules.test_embedding_lossless#L365-L399","kind":"function","name":"test_embedding_lossless","path":"tests/test_modules.py","language":"python","start_line":365,"end_line":399,"context_start_line":345,"context_end_line":419,"code":" kbit.zero_grad()\n\n assert kbit[0].weight.grad is None or kbit[0].weight.grad.sum().item() == 0\n assert kbit[0].weight.grad is None or kbit[0].bias.grad.sum().item() == 0\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"embedding_dim\", [64, 65])\n@pytest.mark.parametrize(\"input_shape\", [(10,), (10, 10), (10, 10, 10)], ids=str)\n@pytest.mark.parametrize(\n \"embedding_class,quant_storage\",\n [\n (bnb.nn.Embedding8bit, None),\n (bnb.nn.EmbeddingFP4, torch.uint8),\n (bnb.nn.EmbeddingFP4, torch.float32),\n (bnb.nn.EmbeddingNF4, torch.uint8),\n (bnb.nn.EmbeddingNF4, torch.float32),\n ],\n ids=lambda x: x.__name__ if inspect.isclass(x) else str(x),\n)\ndef test_embedding_lossless(device, embedding_class, input_shape, embedding_dim, quant_storage):\n if device == \"hpu\":\n if embedding_class is bnb.nn.EmbeddingFP4:\n pytest.skip(\"FP4 is not supported on HPU\")\n elif embedding_class is bnb.nn.EmbeddingNF4 and not is_supported_on_hpu(\"nf4\", torch.float32, quant_storage):\n pytest.skip(\"This configuration is not supported on HPU\")\n\n num_embeddings = 128\n\n src_weight = (torch.randn((num_embeddings, embedding_dim), dtype=torch.float32) > 0).to(\n torch.float32\n ) * 2 - 1 # Embeddings filled with {-1, 1} values. It should compress losslessly\n\n emb_base = nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim,\n _freeze=True,\n _weight=src_weight,\n )\n if embedding_class is bnb.nn.Embedding8bit:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n else:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim, quant_storage=quant_storage)\n\n e.load_state_dict(emb_base.state_dict())\n\n emb_base.to(device)\n e.to(device)\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=input_shape, device=device)\n\n torch.testing.assert_close(\n actual=e(input_tokens),\n expected=emb_base(input_tokens),\n )\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"embedding_dim\", [64, 65])\n@pytest.mark.parametrize(\"input_shape\", [(10,), (10, 10), (10, 10, 10)], ids=str)\n@pytest.mark.parametrize(\n \"embedding_class,quant_storage\",\n [\n (bnb.nn.Embedding8bit, None),\n (bnb.nn.EmbeddingFP4, torch.uint8),\n (bnb.nn.EmbeddingFP4, torch.float32),\n (bnb.nn.EmbeddingNF4, torch.uint8),\n (bnb.nn.EmbeddingNF4, torch.float32),\n ],\n ids=lambda x: x.__name__ if inspect.isclass(x) else str(x),\n)\ndef test_embedding_error(device, embedding_class, input_shape, embedding_dim, quant_storage):\n if device == \"hpu\":\n if embedding_class is bnb.nn.EmbeddingFP4:\n pytest.skip(\"FP4 is not supported on HPU\")","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_embedding_error","uri":"program://bitsandbytes/function/tests.test_modules.test_embedding_error#L416-L452","kind":"function","name":"test_embedding_error","path":"tests/test_modules.py","language":"python","start_line":416,"end_line":452,"context_start_line":396,"context_end_line":472,"code":" torch.testing.assert_close(\n actual=e(input_tokens),\n expected=emb_base(input_tokens),\n )\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"embedding_dim\", [64, 65])\n@pytest.mark.parametrize(\"input_shape\", [(10,), (10, 10), (10, 10, 10)], ids=str)\n@pytest.mark.parametrize(\n \"embedding_class,quant_storage\",\n [\n (bnb.nn.Embedding8bit, None),\n (bnb.nn.EmbeddingFP4, torch.uint8),\n (bnb.nn.EmbeddingFP4, torch.float32),\n (bnb.nn.EmbeddingNF4, torch.uint8),\n (bnb.nn.EmbeddingNF4, torch.float32),\n ],\n ids=lambda x: x.__name__ if inspect.isclass(x) else str(x),\n)\ndef test_embedding_error(device, embedding_class, input_shape, embedding_dim, quant_storage):\n if device == \"hpu\":\n if embedding_class is bnb.nn.EmbeddingFP4:\n pytest.skip(\"FP4 is not supported on HPU\")\n elif embedding_class is bnb.nn.EmbeddingNF4 and not is_supported_on_hpu(\"nf4\", torch.float32, quant_storage):\n pytest.skip(\"This configuration is not supported on HPU\")\n\n is_8bit = embedding_class is bnb.nn.Embedding8bit\n\n num_embeddings = 128\n\n src_weight = torch.rand((num_embeddings, embedding_dim), dtype=torch.float32)\n\n emb_base = nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim,\n _freeze=True,\n _weight=src_weight,\n )\n if is_8bit:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n else:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim, quant_storage=quant_storage)\n\n e.load_state_dict(emb_base.state_dict())\n\n emb_base.to(device)\n e.to(device)\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=input_shape, device=device)\n\n torch.testing.assert_close(\n actual=e(input_tokens),\n expected=emb_base(input_tokens),\n atol=0.05 if is_8bit else 0.20,\n rtol=0.0,\n )\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_linear_warnings(device):\n dim1 = 64\n\n with pytest.warns(UserWarning, match=r\"inference or training\"):\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(10, dim1, device=device, dtype=torch.float16)\n net(inp)\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n with pytest.warns(UserWarning) as record:\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_4bit_linear_warnings","uri":"program://bitsandbytes/function/tests.test_modules.test_4bit_linear_warnings#L456-L481","kind":"function","name":"test_4bit_linear_warnings","path":"tests/test_modules.py","language":"python","start_line":456,"end_line":481,"context_start_line":436,"context_end_line":501,"code":" e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n else:\n e = embedding_class(num_embeddings=num_embeddings, embedding_dim=embedding_dim, quant_storage=quant_storage)\n\n e.load_state_dict(emb_base.state_dict())\n\n emb_base.to(device)\n e.to(device)\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=input_shape, device=device)\n\n torch.testing.assert_close(\n actual=e(input_tokens),\n expected=emb_base(input_tokens),\n atol=0.05 if is_8bit else 0.20,\n rtol=0.0,\n )\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_linear_warnings(device):\n dim1 = 64\n\n with pytest.warns(UserWarning, match=r\"inference or training\"):\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(10, dim1, device=device, dtype=torch.float16)\n net(inp)\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n with pytest.warns(UserWarning) as record:\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(10, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n assert len(record) == 2\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_embedding_warnings(device):\n num_embeddings = 128\n default_block_size = 64\n\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = bnb.nn.Embedding4bit(\n num_embeddings=num_embeddings, embedding_dim=default_block_size + 1, quant_type=\"nf4\"\n )\n net.to(device)\n inp = torch.randint(low=0, high=num_embeddings, size=(1,), device=device)\n net(inp)\n\n\ndef test_4bit_embedding_weight_fsdp_fix(requires_cuda):\n num_embeddings = 64\n embedding_dim = 32\n","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_4bit_embedding_warnings","uri":"program://bitsandbytes/function/tests.test_modules.test_4bit_embedding_warnings#L485-L495","kind":"function","name":"test_4bit_embedding_warnings","path":"tests/test_modules.py","language":"python","start_line":485,"end_line":495,"context_start_line":465,"context_end_line":515,"code":" net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n with pytest.warns(UserWarning) as record:\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(10, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n net = nn.Sequential(*[bnb.nn.Linear4bit(dim1, dim1, quant_type=\"nf4\") for i in range(10)])\n net = net.to(device)\n inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n assert len(record) == 2\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_embedding_warnings(device):\n num_embeddings = 128\n default_block_size = 64\n\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = bnb.nn.Embedding4bit(\n num_embeddings=num_embeddings, embedding_dim=default_block_size + 1, quant_type=\"nf4\"\n )\n net.to(device)\n inp = torch.randint(low=0, high=num_embeddings, size=(1,), device=device)\n net(inp)\n\n\ndef test_4bit_embedding_weight_fsdp_fix(requires_cuda):\n num_embeddings = 64\n embedding_dim = 32\n\n module = bnb.nn.Embedding4bit(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=(1,), device=\"cuda\")\n\n module(input_tokens)\n\n assert module.weight.quant_state is not None\n\n\ndef test_4bit_linear_weight_fsdp_fix(requires_cuda):","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_4bit_embedding_weight_fsdp_fix","uri":"program://bitsandbytes/function/tests.test_modules.test_4bit_embedding_weight_fsdp_fix#L498-L512","kind":"function","name":"test_4bit_embedding_weight_fsdp_fix","path":"tests/test_modules.py","language":"python","start_line":498,"end_line":512,"context_start_line":478,"context_end_line":532,"code":" inp = torch.rand(1, dim1, device=device, dtype=torch.float16)\n net(inp)\n\n assert len(record) == 2\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\ndef test_4bit_embedding_warnings(device):\n num_embeddings = 128\n default_block_size = 64\n\n with pytest.warns(UserWarning, match=r\"inference.\"):\n net = bnb.nn.Embedding4bit(\n num_embeddings=num_embeddings, embedding_dim=default_block_size + 1, quant_type=\"nf4\"\n )\n net.to(device)\n inp = torch.randint(low=0, high=num_embeddings, size=(1,), device=device)\n net(inp)\n\n\ndef test_4bit_embedding_weight_fsdp_fix(requires_cuda):\n num_embeddings = 64\n embedding_dim = 32\n\n module = bnb.nn.Embedding4bit(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=(1,), device=\"cuda\")\n\n module(input_tokens)\n\n assert module.weight.quant_state is not None\n\n\ndef test_4bit_linear_weight_fsdp_fix(requires_cuda):\n inp_size = 64\n out_size = 32\n\n module = bnb.nn.Linear4bit(inp_size, out_size)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tensor = torch.randn((1, inp_size), device=\"cuda\")\n\n module(input_tensor)\n\n assert module.weight.quant_state is not None\n\n\ndef test_embedding_not_implemented_error():","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_4bit_linear_weight_fsdp_fix","uri":"program://bitsandbytes/function/tests.test_modules.test_4bit_linear_weight_fsdp_fix#L515-L529","kind":"function","name":"test_4bit_linear_weight_fsdp_fix","path":"tests/test_modules.py","language":"python","start_line":515,"end_line":529,"context_start_line":495,"context_end_line":539,"code":" net(inp)\n\n\ndef test_4bit_embedding_weight_fsdp_fix(requires_cuda):\n num_embeddings = 64\n embedding_dim = 32\n\n module = bnb.nn.Embedding4bit(num_embeddings=num_embeddings, embedding_dim=embedding_dim)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tokens = torch.randint(low=0, high=num_embeddings, size=(1,), device=\"cuda\")\n\n module(input_tokens)\n\n assert module.weight.quant_state is not None\n\n\ndef test_4bit_linear_weight_fsdp_fix(requires_cuda):\n inp_size = 64\n out_size = 32\n\n module = bnb.nn.Linear4bit(inp_size, out_size)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tensor = torch.randn((1, inp_size), device=\"cuda\")\n\n module(input_tensor)\n\n assert module.weight.quant_state is not None\n\n\ndef test_embedding_not_implemented_error():\n with pytest.raises(NotImplementedError):\n emb = bnb.nn.Embedding4bit(32, 32)\n emb.state_dict()\n\n with pytest.raises(NotImplementedError):\n emb = bnb.nn.Embedding8bit(32, 32)\n emb.state_dict()","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.test_embedding_not_implemented_error","uri":"program://bitsandbytes/function/tests.test_modules.test_embedding_not_implemented_error#L532-L539","kind":"function","name":"test_embedding_not_implemented_error","path":"tests/test_modules.py","language":"python","start_line":532,"end_line":539,"context_start_line":512,"context_end_line":539,"code":" assert module.weight.quant_state is not None\n\n\ndef test_4bit_linear_weight_fsdp_fix(requires_cuda):\n inp_size = 64\n out_size = 32\n\n module = bnb.nn.Linear4bit(inp_size, out_size)\n\n module.cuda()\n\n module.weight.quant_state = None\n\n input_tensor = torch.randn((1, inp_size), device=\"cuda\")\n\n module(input_tensor)\n\n assert module.weight.quant_state is not None\n\n\ndef test_embedding_not_implemented_error():\n with pytest.raises(NotImplementedError):\n emb = bnb.nn.Embedding4bit(32, 32)\n emb.state_dict()\n\n with pytest.raises(NotImplementedError):\n emb = bnb.nn.Embedding8bit(32, 32)\n emb.state_dict()","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.__init__","uri":"program://bitsandbytes/function/tests.test_modules.__init__#L18-L31","kind":"function","name":"__init__","path":"tests/test_modules.py","language":"python","start_line":18,"end_line":31,"context_start_line":1,"context_end_line":51,"code":"import inspect\n\nimport pytest\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\nfrom tests.helpers import get_available_devices, id_formatter, is_supported_on_hpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass MLP8bit(torch.nn.Module):\n def __init__(self, dim1, dim2, has_fp16_weights=True, threshold=0.0):\n super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,\n dim2,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n self.fc2 = bnb.nn.Linear8bitLt(\n dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_modules.forward","uri":"program://bitsandbytes/function/tests.test_modules.forward#L33-L36","kind":"function","name":"forward","path":"tests/test_modules.py","language":"python","start_line":33,"end_line":36,"context_start_line":13,"context_end_line":56,"code":" for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass MLP8bit(torch.nn.Module):\n def __init__(self, dim1, dim2, has_fp16_weights=True, threshold=0.0):\n super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,\n dim2,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n self.fc2 = bnb.nn.Linear8bitLt(\n dim2,\n dim1,\n has_fp16_weights=has_fp16_weights,\n threshold=threshold,\n )\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.fc2(x)\n return x\n\n\ndef get_args():\n args = MockArgs([])\n args.quant_type = \"vector\"\n args.use_8bit_training = \"full\"\n args.clip_freq = 9999\n return args\n\n\ndef assert_all_approx_close(a, b, atol=1e-8, rtol=1e-5, count=10):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"threshold\"))","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops","uri":"program://bitsandbytes/module/tests.test_ops#L1-L233","kind":"module","name":"tests.test_ops","path":"tests/test_ops.py","language":"python","start_line":1,"end_line":233,"context_start_line":1,"context_end_line":233,"code":"from math import prod\n\nimport pytest\nimport torch\n\nimport bitsandbytes\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom tests.helpers import TRUE_FALSE, get_available_devices, id_formatter, is_supported_on_hpu\n\n# torch.library.opcheck is only available in torch 2.4 and later.\n# When testing with older versions, we will skip it as a no-op.\nif torch.__version__ >= (2, 4):\n opcheck = torch.library.opcheck\nelse:\n opcheck = lambda *args, **kwargs: None\n\n\nclass TestLLMInt8Ops:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n out = torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.default, (A, B))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul_out(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n\n out = torch.empty((10, 30), dtype=torch.int32, device=device)\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.out, (A, B, out))\n\n @pytest.mark.parametrize(\"threshold\", [0.0, 6.0])\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_vectorwise_quant(self, threshold, device):\n A = torch.randn(10, 20, dtype=torch.float16, device=device)\n A[1][0] = 1000.0\n\n out_row, row_stats, outlier_cols = torch.ops.bitsandbytes.int8_vectorwise_quant(A, threshold=threshold)\n\n assert out_row.shape == (10, 20)\n assert out_row.dtype == torch.int8\n assert out_row.device == A.device\n assert row_stats.shape == (10,)\n assert row_stats.dtype == torch.float32\n assert row_stats.device == A.device\n\n if threshold > 0.0:\n assert outlier_cols is not None\n assert outlier_cols.dim() == 1\n assert outlier_cols.shape[0] <= A.shape[1]\n assert outlier_cols.device == A.device\n else:\n assert outlier_cols is None\n\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A,))\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A, threshold))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_mm_dequant(self, device):\n A = torch.randint(-128, 127, (256, 256), dtype=torch.int32, device=device)\n row_stats = torch.randn(256, dtype=torch.float32, device=device)\n col_stats = torch.randn(256, dtype=torch.float32, device=device)\n out = torch.ops.bitsandbytes.int8_mm_dequant(A, row_stats, col_stats)\n\n assert out.shape == A.shape\n assert out.dtype == torch.float16\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_mm_dequant, (A, row_stats, col_stats))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n row_stats = torch.randn(10, dtype=torch.float32, device=device)\n col_stats = torch.randn(30, dtype=torch.float32, device=device)\n bias = torch.randn(30, dtype=dtype, device=device) if has_bias else None\n out = torch.ops.bitsandbytes.int8_scaled_mm(A, B, row_stats, col_stats, bias=bias, dtype=dtype)\n\n assert out.shape == (10, 30)\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_scaled_mm, (A, B, row_stats, col_stats, bias, dtype))\n\n\nclass TestInt8BlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\":\n if dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n if blocksize != 256:\n pytest.skip(\"CPU implementation is slow; only test blocksize=256\")\n\n code = bitsandbytes.functional.create_dynamic_map().to(device)\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n out, absmax = torch.ops.bitsandbytes.quantize_blockwise(A, code, blocksize)\n\n assert out.shape == A.shape\n assert out.dtype == torch.uint8\n assert out.device == A.device\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n opcheck(torch.ops.bitsandbytes.quantize_blockwise, (A, code, blocksize))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\" and dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n A = torch.randint(0, 255, (1024, 1024), dtype=torch.uint8, device=device)\n code = bitsandbytes.functional.create_dynamic_map().to(device, dtype=torch.float32)\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.randn((blocks,), device=device, dtype=torch.float32)\n\n out = torch.ops.bitsandbytes.dequantize_blockwise.default(A, absmax, code, blocksize, dtype)\n\n assert out.shape == A.shape\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.dequantize_blockwise.default, (A, absmax, code, blocksize, dtype))\n\n\nclass Test4bitBlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n\n out, absmax = torch.ops.bitsandbytes.quantize_4bit.default(A, blocksize, quant_type, storage_dtype)\n\n assert out.device == A.device\n assert out.dtype == storage_dtype\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n if storage_dtype != torch.uint8:\n pytest.xfail(\"opcheck fails for storage_dtype != torch.uint8\")\n\n opcheck(torch.ops.bitsandbytes.quantize_4bit.default, (A, blocksize, quant_type, storage_dtype))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n shape = (128, 128)\n\n n = prod(shape)\n blocks = -(n // -blocksize)\n quantized_shape = ((n + 1) // (storage_dtype.itemsize * 2), 1)\n\n A = (\n torch.randint(0, 255, ((n + 1) // 2,), dtype=torch.uint8, device=device)\n .view(storage_dtype)\n .reshape(quantized_shape)\n .contiguous()\n )\n\n absmax = torch.randn((blocks,), dtype=torch.float32, device=device)\n\n out = torch.ops.bitsandbytes.dequantize_4bit.default(A, absmax, blocksize, quant_type, shape, dtype)\n\n assert out.device == A.device\n assert out.shape == shape\n\n opcheck(\n torch.ops.bitsandbytes.dequantize_4bit.default,\n (A, absmax, blocksize, quant_type, shape, dtype),\n )\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_gemv_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n out_features = 1024\n in_features = 256\n\n A = torch.randn((1, 1, in_features), dtype=dtype, device=device)\n B = torch.randn((out_features, in_features), dtype=dtype, device=A.device)\n B_q, absmax = torch.ops.bitsandbytes.quantize_4bit(B, blocksize, quant_type, storage_dtype)\n code = bitsandbytes.functional.get_4bit_type(quant_type, device=A.device, blocksize=blocksize)\n\n out = torch.ops.bitsandbytes.gemv_4bit.default(A, B_q, B.shape, absmax, code, blocksize)\n\n assert out.device == A.device\n assert out.dtype == dtype\n assert out.shape == (1, 1, out_features)\n assert out.isreal().all()\n\n opcheck(torch.ops.bitsandbytes.gemv_4bit.default, (A, B_q, B.shape, absmax, code, blocksize))","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.TestLLMInt8Ops","uri":"program://bitsandbytes/class/tests.test_ops.TestLLMInt8Ops#L18-L99","kind":"class","name":"TestLLMInt8Ops","path":"tests/test_ops.py","language":"python","start_line":18,"end_line":99,"context_start_line":1,"context_end_line":119,"code":"from math import prod\n\nimport pytest\nimport torch\n\nimport bitsandbytes\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom tests.helpers import TRUE_FALSE, get_available_devices, id_formatter, is_supported_on_hpu\n\n# torch.library.opcheck is only available in torch 2.4 and later.\n# When testing with older versions, we will skip it as a no-op.\nif torch.__version__ >= (2, 4):\n opcheck = torch.library.opcheck\nelse:\n opcheck = lambda *args, **kwargs: None\n\n\nclass TestLLMInt8Ops:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n out = torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.default, (A, B))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul_out(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n\n out = torch.empty((10, 30), dtype=torch.int32, device=device)\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.out, (A, B, out))\n\n @pytest.mark.parametrize(\"threshold\", [0.0, 6.0])\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_vectorwise_quant(self, threshold, device):\n A = torch.randn(10, 20, dtype=torch.float16, device=device)\n A[1][0] = 1000.0\n\n out_row, row_stats, outlier_cols = torch.ops.bitsandbytes.int8_vectorwise_quant(A, threshold=threshold)\n\n assert out_row.shape == (10, 20)\n assert out_row.dtype == torch.int8\n assert out_row.device == A.device\n assert row_stats.shape == (10,)\n assert row_stats.dtype == torch.float32\n assert row_stats.device == A.device\n\n if threshold > 0.0:\n assert outlier_cols is not None\n assert outlier_cols.dim() == 1\n assert outlier_cols.shape[0] <= A.shape[1]\n assert outlier_cols.device == A.device\n else:\n assert outlier_cols is None\n\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A,))\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A, threshold))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_mm_dequant(self, device):\n A = torch.randint(-128, 127, (256, 256), dtype=torch.int32, device=device)\n row_stats = torch.randn(256, dtype=torch.float32, device=device)\n col_stats = torch.randn(256, dtype=torch.float32, device=device)\n out = torch.ops.bitsandbytes.int8_mm_dequant(A, row_stats, col_stats)\n\n assert out.shape == A.shape\n assert out.dtype == torch.float16\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_mm_dequant, (A, row_stats, col_stats))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n row_stats = torch.randn(10, dtype=torch.float32, device=device)\n col_stats = torch.randn(30, dtype=torch.float32, device=device)\n bias = torch.randn(30, dtype=dtype, device=device) if has_bias else None\n out = torch.ops.bitsandbytes.int8_scaled_mm(A, B, row_stats, col_stats, bias=bias, dtype=dtype)\n\n assert out.shape == (10, 30)\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_scaled_mm, (A, B, row_stats, col_stats, bias, dtype))\n\n\nclass TestInt8BlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\":\n if dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n if blocksize != 256:\n pytest.skip(\"CPU implementation is slow; only test blocksize=256\")\n\n code = bitsandbytes.functional.create_dynamic_map().to(device)\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n out, absmax = torch.ops.bitsandbytes.quantize_blockwise(A, code, blocksize)\n\n assert out.shape == A.shape\n assert out.dtype == torch.uint8","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.TestInt8BlockwiseQuantOps","uri":"program://bitsandbytes/class/tests.test_ops.TestInt8BlockwiseQuantOps#L102-L147","kind":"class","name":"TestInt8BlockwiseQuantOps","path":"tests/test_ops.py","language":"python","start_line":102,"end_line":147,"context_start_line":82,"context_end_line":167,"code":" opcheck(torch.ops.bitsandbytes.int8_mm_dequant, (A, row_stats, col_stats))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n row_stats = torch.randn(10, dtype=torch.float32, device=device)\n col_stats = torch.randn(30, dtype=torch.float32, device=device)\n bias = torch.randn(30, dtype=dtype, device=device) if has_bias else None\n out = torch.ops.bitsandbytes.int8_scaled_mm(A, B, row_stats, col_stats, bias=bias, dtype=dtype)\n\n assert out.shape == (10, 30)\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_scaled_mm, (A, B, row_stats, col_stats, bias, dtype))\n\n\nclass TestInt8BlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\":\n if dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n if blocksize != 256:\n pytest.skip(\"CPU implementation is slow; only test blocksize=256\")\n\n code = bitsandbytes.functional.create_dynamic_map().to(device)\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n out, absmax = torch.ops.bitsandbytes.quantize_blockwise(A, code, blocksize)\n\n assert out.shape == A.shape\n assert out.dtype == torch.uint8\n assert out.device == A.device\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n opcheck(torch.ops.bitsandbytes.quantize_blockwise, (A, code, blocksize))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\" and dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n A = torch.randint(0, 255, (1024, 1024), dtype=torch.uint8, device=device)\n code = bitsandbytes.functional.create_dynamic_map().to(device, dtype=torch.float32)\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.randn((blocks,), device=device, dtype=torch.float32)\n\n out = torch.ops.bitsandbytes.dequantize_blockwise.default(A, absmax, code, blocksize, dtype)\n\n assert out.shape == A.shape\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.dequantize_blockwise.default, (A, absmax, code, blocksize, dtype))\n\n\nclass Test4bitBlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n\n out, absmax = torch.ops.bitsandbytes.quantize_4bit.default(A, blocksize, quant_type, storage_dtype)\n\n assert out.device == A.device\n assert out.dtype == storage_dtype\n\n assert absmax.device == A.device","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.Test4bitBlockwiseQuantOps","uri":"program://bitsandbytes/class/tests.test_ops.Test4bitBlockwiseQuantOps#L150-L233","kind":"class","name":"Test4bitBlockwiseQuantOps","path":"tests/test_ops.py","language":"python","start_line":150,"end_line":233,"context_start_line":130,"context_end_line":233,"code":" def test_dequantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\" and dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n A = torch.randint(0, 255, (1024, 1024), dtype=torch.uint8, device=device)\n code = bitsandbytes.functional.create_dynamic_map().to(device, dtype=torch.float32)\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.randn((blocks,), device=device, dtype=torch.float32)\n\n out = torch.ops.bitsandbytes.dequantize_blockwise.default(A, absmax, code, blocksize, dtype)\n\n assert out.shape == A.shape\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.dequantize_blockwise.default, (A, absmax, code, blocksize, dtype))\n\n\nclass Test4bitBlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n\n out, absmax = torch.ops.bitsandbytes.quantize_4bit.default(A, blocksize, quant_type, storage_dtype)\n\n assert out.device == A.device\n assert out.dtype == storage_dtype\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n if storage_dtype != torch.uint8:\n pytest.xfail(\"opcheck fails for storage_dtype != torch.uint8\")\n\n opcheck(torch.ops.bitsandbytes.quantize_4bit.default, (A, blocksize, quant_type, storage_dtype))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n shape = (128, 128)\n\n n = prod(shape)\n blocks = -(n // -blocksize)\n quantized_shape = ((n + 1) // (storage_dtype.itemsize * 2), 1)\n\n A = (\n torch.randint(0, 255, ((n + 1) // 2,), dtype=torch.uint8, device=device)\n .view(storage_dtype)\n .reshape(quantized_shape)\n .contiguous()\n )\n\n absmax = torch.randn((blocks,), dtype=torch.float32, device=device)\n\n out = torch.ops.bitsandbytes.dequantize_4bit.default(A, absmax, blocksize, quant_type, shape, dtype)\n\n assert out.device == A.device\n assert out.shape == shape\n\n opcheck(\n torch.ops.bitsandbytes.dequantize_4bit.default,\n (A, absmax, blocksize, quant_type, shape, dtype),\n )\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_gemv_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n out_features = 1024\n in_features = 256\n\n A = torch.randn((1, 1, in_features), dtype=dtype, device=device)\n B = torch.randn((out_features, in_features), dtype=dtype, device=A.device)\n B_q, absmax = torch.ops.bitsandbytes.quantize_4bit(B, blocksize, quant_type, storage_dtype)\n code = bitsandbytes.functional.get_4bit_type(quant_type, device=A.device, blocksize=blocksize)\n\n out = torch.ops.bitsandbytes.gemv_4bit.default(A, B_q, B.shape, absmax, code, blocksize)\n\n assert out.device == A.device\n assert out.dtype == dtype\n assert out.shape == (1, 1, out_features)\n assert out.isreal().all()\n\n opcheck(torch.ops.bitsandbytes.gemv_4bit.default, (A, B_q, B.shape, absmax, code, blocksize))","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_int8_linear_matmul","uri":"program://bitsandbytes/function/tests.test_ops.test_int8_linear_matmul#L20-L29","kind":"function","name":"test_int8_linear_matmul","path":"tests/test_ops.py","language":"python","start_line":20,"end_line":29,"context_start_line":1,"context_end_line":49,"code":"from math import prod\n\nimport pytest\nimport torch\n\nimport bitsandbytes\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom tests.helpers import TRUE_FALSE, get_available_devices, id_formatter, is_supported_on_hpu\n\n# torch.library.opcheck is only available in torch 2.4 and later.\n# When testing with older versions, we will skip it as a no-op.\nif torch.__version__ >= (2, 4):\n opcheck = torch.library.opcheck\nelse:\n opcheck = lambda *args, **kwargs: None\n\n\nclass TestLLMInt8Ops:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n out = torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.default, (A, B))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul_out(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n\n out = torch.empty((10, 30), dtype=torch.int32, device=device)\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.out, (A, B, out))\n\n @pytest.mark.parametrize(\"threshold\", [0.0, 6.0])\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_vectorwise_quant(self, threshold, device):\n A = torch.randn(10, 20, dtype=torch.float16, device=device)\n A[1][0] = 1000.0","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_int8_linear_matmul_out","uri":"program://bitsandbytes/function/tests.test_ops.test_int8_linear_matmul_out#L32-L43","kind":"function","name":"test_int8_linear_matmul_out","path":"tests/test_ops.py","language":"python","start_line":32,"end_line":43,"context_start_line":12,"context_end_line":63,"code":"if torch.__version__ >= (2, 4):\n opcheck = torch.library.opcheck\nelse:\n opcheck = lambda *args, **kwargs: None\n\n\nclass TestLLMInt8Ops:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n out = torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.default, (A, B))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul_out(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n\n out = torch.empty((10, 30), dtype=torch.int32, device=device)\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.out, (A, B, out))\n\n @pytest.mark.parametrize(\"threshold\", [0.0, 6.0])\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_vectorwise_quant(self, threshold, device):\n A = torch.randn(10, 20, dtype=torch.float16, device=device)\n A[1][0] = 1000.0\n\n out_row, row_stats, outlier_cols = torch.ops.bitsandbytes.int8_vectorwise_quant(A, threshold=threshold)\n\n assert out_row.shape == (10, 20)\n assert out_row.dtype == torch.int8\n assert out_row.device == A.device\n assert row_stats.shape == (10,)\n assert row_stats.dtype == torch.float32\n assert row_stats.device == A.device\n\n if threshold > 0.0:\n assert outlier_cols is not None\n assert outlier_cols.dim() == 1\n assert outlier_cols.shape[0] <= A.shape[1]","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_int8_vectorwise_quant","uri":"program://bitsandbytes/function/tests.test_ops.test_int8_vectorwise_quant#L47-L69","kind":"function","name":"test_int8_vectorwise_quant","path":"tests/test_ops.py","language":"python","start_line":47,"end_line":69,"context_start_line":27,"context_end_line":89,"code":" assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.default, (A, B))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul_out(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n\n out = torch.empty((10, 30), dtype=torch.int32, device=device)\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n\n assert out.shape == (10, 30)\n assert out.dtype == torch.int32\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_linear_matmul.out, (A, B, out))\n\n @pytest.mark.parametrize(\"threshold\", [0.0, 6.0])\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_vectorwise_quant(self, threshold, device):\n A = torch.randn(10, 20, dtype=torch.float16, device=device)\n A[1][0] = 1000.0\n\n out_row, row_stats, outlier_cols = torch.ops.bitsandbytes.int8_vectorwise_quant(A, threshold=threshold)\n\n assert out_row.shape == (10, 20)\n assert out_row.dtype == torch.int8\n assert out_row.device == A.device\n assert row_stats.shape == (10,)\n assert row_stats.dtype == torch.float32\n assert row_stats.device == A.device\n\n if threshold > 0.0:\n assert outlier_cols is not None\n assert outlier_cols.dim() == 1\n assert outlier_cols.shape[0] <= A.shape[1]\n assert outlier_cols.device == A.device\n else:\n assert outlier_cols is None\n\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A,))\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A, threshold))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_mm_dequant(self, device):\n A = torch.randint(-128, 127, (256, 256), dtype=torch.int32, device=device)\n row_stats = torch.randn(256, dtype=torch.float32, device=device)\n col_stats = torch.randn(256, dtype=torch.float32, device=device)\n out = torch.ops.bitsandbytes.int8_mm_dequant(A, row_stats, col_stats)\n\n assert out.shape == A.shape\n assert out.dtype == torch.float16\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_mm_dequant, (A, row_stats, col_stats))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_int8_mm_dequant","uri":"program://bitsandbytes/function/tests.test_ops.test_int8_mm_dequant#L72-L82","kind":"function","name":"test_int8_mm_dequant","path":"tests/test_ops.py","language":"python","start_line":72,"end_line":82,"context_start_line":52,"context_end_line":102,"code":"\n assert out_row.shape == (10, 20)\n assert out_row.dtype == torch.int8\n assert out_row.device == A.device\n assert row_stats.shape == (10,)\n assert row_stats.dtype == torch.float32\n assert row_stats.device == A.device\n\n if threshold > 0.0:\n assert outlier_cols is not None\n assert outlier_cols.dim() == 1\n assert outlier_cols.shape[0] <= A.shape[1]\n assert outlier_cols.device == A.device\n else:\n assert outlier_cols is None\n\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A,))\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A, threshold))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_mm_dequant(self, device):\n A = torch.randint(-128, 127, (256, 256), dtype=torch.int32, device=device)\n row_stats = torch.randn(256, dtype=torch.float32, device=device)\n col_stats = torch.randn(256, dtype=torch.float32, device=device)\n out = torch.ops.bitsandbytes.int8_mm_dequant(A, row_stats, col_stats)\n\n assert out.shape == A.shape\n assert out.dtype == torch.float16\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_mm_dequant, (A, row_stats, col_stats))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n row_stats = torch.randn(10, dtype=torch.float32, device=device)\n col_stats = torch.randn(30, dtype=torch.float32, device=device)\n bias = torch.randn(30, dtype=dtype, device=device) if has_bias else None\n out = torch.ops.bitsandbytes.int8_scaled_mm(A, B, row_stats, col_stats, bias=bias, dtype=dtype)\n\n assert out.shape == (10, 30)\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_scaled_mm, (A, B, row_stats, col_stats, bias, dtype))\n\n\nclass TestInt8BlockwiseQuantOps:","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_int8_scaled_mm","uri":"program://bitsandbytes/function/tests.test_ops.test_int8_scaled_mm#L87-L99","kind":"function","name":"test_int8_scaled_mm","path":"tests/test_ops.py","language":"python","start_line":87,"end_line":99,"context_start_line":67,"context_end_line":119,"code":"\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A,))\n opcheck(torch.ops.bitsandbytes.int8_vectorwise_quant, (A, threshold))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_mm_dequant(self, device):\n A = torch.randint(-128, 127, (256, 256), dtype=torch.int32, device=device)\n row_stats = torch.randn(256, dtype=torch.float32, device=device)\n col_stats = torch.randn(256, dtype=torch.float32, device=device)\n out = torch.ops.bitsandbytes.int8_mm_dequant(A, row_stats, col_stats)\n\n assert out.shape == A.shape\n assert out.dtype == torch.float16\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_mm_dequant, (A, row_stats, col_stats))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n row_stats = torch.randn(10, dtype=torch.float32, device=device)\n col_stats = torch.randn(30, dtype=torch.float32, device=device)\n bias = torch.randn(30, dtype=dtype, device=device) if has_bias else None\n out = torch.ops.bitsandbytes.int8_scaled_mm(A, B, row_stats, col_stats, bias=bias, dtype=dtype)\n\n assert out.shape == (10, 30)\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_scaled_mm, (A, B, row_stats, col_stats, bias, dtype))\n\n\nclass TestInt8BlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\":\n if dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n if blocksize != 256:\n pytest.skip(\"CPU implementation is slow; only test blocksize=256\")\n\n code = bitsandbytes.functional.create_dynamic_map().to(device)\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n out, absmax = torch.ops.bitsandbytes.quantize_blockwise(A, code, blocksize)\n\n assert out.shape == A.shape\n assert out.dtype == torch.uint8","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_quantize_blockwise","uri":"program://bitsandbytes/function/tests.test_ops.test_quantize_blockwise#L106-L125","kind":"function","name":"test_quantize_blockwise","path":"tests/test_ops.py","language":"python","start_line":106,"end_line":125,"context_start_line":86,"context_end_line":145,"code":" @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE)\n def test_int8_scaled_mm(self, device, dtype, has_bias):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, (30, 20), dtype=torch.int8, device=device)\n row_stats = torch.randn(10, dtype=torch.float32, device=device)\n col_stats = torch.randn(30, dtype=torch.float32, device=device)\n bias = torch.randn(30, dtype=dtype, device=device) if has_bias else None\n out = torch.ops.bitsandbytes.int8_scaled_mm(A, B, row_stats, col_stats, bias=bias, dtype=dtype)\n\n assert out.shape == (10, 30)\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.int8_scaled_mm, (A, B, row_stats, col_stats, bias, dtype))\n\n\nclass TestInt8BlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\":\n if dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n if blocksize != 256:\n pytest.skip(\"CPU implementation is slow; only test blocksize=256\")\n\n code = bitsandbytes.functional.create_dynamic_map().to(device)\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n out, absmax = torch.ops.bitsandbytes.quantize_blockwise(A, code, blocksize)\n\n assert out.shape == A.shape\n assert out.dtype == torch.uint8\n assert out.device == A.device\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n opcheck(torch.ops.bitsandbytes.quantize_blockwise, (A, code, blocksize))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\" and dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n A = torch.randint(0, 255, (1024, 1024), dtype=torch.uint8, device=device)\n code = bitsandbytes.functional.create_dynamic_map().to(device, dtype=torch.float32)\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.randn((blocks,), device=device, dtype=torch.float32)\n\n out = torch.ops.bitsandbytes.dequantize_blockwise.default(A, absmax, code, blocksize, dtype)\n\n assert out.shape == A.shape\n assert out.dtype == dtype\n assert out.device == A.device","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_dequantize_blockwise","uri":"program://bitsandbytes/function/tests.test_ops.test_dequantize_blockwise#L130-L147","kind":"function","name":"test_dequantize_blockwise","path":"tests/test_ops.py","language":"python","start_line":130,"end_line":147,"context_start_line":110,"context_end_line":167,"code":"\n if blocksize != 256:\n pytest.skip(\"CPU implementation is slow; only test blocksize=256\")\n\n code = bitsandbytes.functional.create_dynamic_map().to(device)\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n out, absmax = torch.ops.bitsandbytes.quantize_blockwise(A, code, blocksize)\n\n assert out.shape == A.shape\n assert out.dtype == torch.uint8\n assert out.device == A.device\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n opcheck(torch.ops.bitsandbytes.quantize_blockwise, (A, code, blocksize))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_blockwise(self, device, dtype, blocksize):\n if device == \"cpu\" and dtype != torch.float32:\n pytest.skip(\"CPU implementation is only available for float32\")\n\n A = torch.randint(0, 255, (1024, 1024), dtype=torch.uint8, device=device)\n code = bitsandbytes.functional.create_dynamic_map().to(device, dtype=torch.float32)\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.randn((blocks,), device=device, dtype=torch.float32)\n\n out = torch.ops.bitsandbytes.dequantize_blockwise.default(A, absmax, code, blocksize, dtype)\n\n assert out.shape == A.shape\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.dequantize_blockwise.default, (A, absmax, code, blocksize, dtype))\n\n\nclass Test4bitBlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n\n out, absmax = torch.ops.bitsandbytes.quantize_4bit.default(A, blocksize, quant_type, storage_dtype)\n\n assert out.device == A.device\n assert out.dtype == storage_dtype\n\n assert absmax.device == A.device","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_quantize_4bit","uri":"program://bitsandbytes/function/tests.test_ops.test_quantize_4bit#L156-L173","kind":"function","name":"test_quantize_4bit","path":"tests/test_ops.py","language":"python","start_line":156,"end_line":173,"context_start_line":136,"context_end_line":193,"code":"\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.randn((blocks,), device=device, dtype=torch.float32)\n\n out = torch.ops.bitsandbytes.dequantize_blockwise.default(A, absmax, code, blocksize, dtype)\n\n assert out.shape == A.shape\n assert out.dtype == dtype\n assert out.device == A.device\n\n opcheck(torch.ops.bitsandbytes.dequantize_blockwise.default, (A, absmax, code, blocksize, dtype))\n\n\nclass Test4bitBlockwiseQuantOps:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_quantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A = torch.randn(1024, 1024, dtype=dtype, device=device)\n\n out, absmax = torch.ops.bitsandbytes.quantize_4bit.default(A, blocksize, quant_type, storage_dtype)\n\n assert out.device == A.device\n assert out.dtype == storage_dtype\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n if storage_dtype != torch.uint8:\n pytest.xfail(\"opcheck fails for storage_dtype != torch.uint8\")\n\n opcheck(torch.ops.bitsandbytes.quantize_4bit.default, (A, blocksize, quant_type, storage_dtype))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n shape = (128, 128)\n\n n = prod(shape)\n blocks = -(n // -blocksize)\n quantized_shape = ((n + 1) // (storage_dtype.itemsize * 2), 1)\n\n A = (\n torch.randint(0, 255, ((n + 1) // 2,), dtype=torch.uint8, device=device)\n .view(storage_dtype)\n .reshape(quantized_shape)","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_dequantize_4bit","uri":"program://bitsandbytes/function/tests.test_ops.test_dequantize_4bit#L180-L207","kind":"function","name":"test_dequantize_4bit","path":"tests/test_ops.py","language":"python","start_line":180,"end_line":207,"context_start_line":160,"context_end_line":227,"code":" A = torch.randn(1024, 1024, dtype=dtype, device=device)\n\n out, absmax = torch.ops.bitsandbytes.quantize_4bit.default(A, blocksize, quant_type, storage_dtype)\n\n assert out.device == A.device\n assert out.dtype == storage_dtype\n\n assert absmax.device == A.device\n assert absmax.dtype == torch.float32\n\n if storage_dtype != torch.uint8:\n pytest.xfail(\"opcheck fails for storage_dtype != torch.uint8\")\n\n opcheck(torch.ops.bitsandbytes.quantize_4bit.default, (A, blocksize, quant_type, storage_dtype))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_dequantize_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n shape = (128, 128)\n\n n = prod(shape)\n blocks = -(n // -blocksize)\n quantized_shape = ((n + 1) // (storage_dtype.itemsize * 2), 1)\n\n A = (\n torch.randint(0, 255, ((n + 1) // 2,), dtype=torch.uint8, device=device)\n .view(storage_dtype)\n .reshape(quantized_shape)\n .contiguous()\n )\n\n absmax = torch.randn((blocks,), dtype=torch.float32, device=device)\n\n out = torch.ops.bitsandbytes.dequantize_4bit.default(A, absmax, blocksize, quant_type, shape, dtype)\n\n assert out.device == A.device\n assert out.shape == shape\n\n opcheck(\n torch.ops.bitsandbytes.dequantize_4bit.default,\n (A, absmax, blocksize, quant_type, shape, dtype),\n )\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_gemv_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n out_features = 1024\n in_features = 256\n\n A = torch.randn((1, 1, in_features), dtype=dtype, device=device)\n B = torch.randn((out_features, in_features), dtype=dtype, device=A.device)\n B_q, absmax = torch.ops.bitsandbytes.quantize_4bit(B, blocksize, quant_type, storage_dtype)\n code = bitsandbytes.functional.get_4bit_type(quant_type, device=A.device, blocksize=blocksize)\n\n out = torch.ops.bitsandbytes.gemv_4bit.default(A, B_q, B.shape, absmax, code, blocksize)\n","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_ops.test_gemv_4bit","uri":"program://bitsandbytes/function/tests.test_ops.test_gemv_4bit#L214-L233","kind":"function","name":"test_gemv_4bit","path":"tests/test_ops.py","language":"python","start_line":214,"end_line":233,"context_start_line":194,"context_end_line":233,"code":" .contiguous()\n )\n\n absmax = torch.randn((blocks,), dtype=torch.float32, device=device)\n\n out = torch.ops.bitsandbytes.dequantize_4bit.default(A, absmax, blocksize, quant_type, shape, dtype)\n\n assert out.device == A.device\n assert out.shape == shape\n\n opcheck(\n torch.ops.bitsandbytes.dequantize_4bit.default,\n (A, absmax, blocksize, quant_type, shape, dtype),\n )\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=id_formatter(\"dtype\"))\n @pytest.mark.parametrize(\"storage_dtype\", [torch.uint8, torch.bfloat16], ids=id_formatter(\"storage_dtype\"))\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128, 256, 512] if not HIP_ENVIRONMENT else [128, 256, 512])\n def test_gemv_4bit(self, device, dtype, storage_dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype, storage_dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n out_features = 1024\n in_features = 256\n\n A = torch.randn((1, 1, in_features), dtype=dtype, device=device)\n B = torch.randn((out_features, in_features), dtype=dtype, device=A.device)\n B_q, absmax = torch.ops.bitsandbytes.quantize_4bit(B, blocksize, quant_type, storage_dtype)\n code = bitsandbytes.functional.get_4bit_type(quant_type, device=A.device, blocksize=blocksize)\n\n out = torch.ops.bitsandbytes.gemv_4bit.default(A, B_q, B.shape, absmax, code, blocksize)\n\n assert out.device == A.device\n assert out.dtype == dtype\n assert out.shape == (1, 1, out_features)\n assert out.isreal().all()\n\n opcheck(torch.ops.bitsandbytes.gemv_4bit.default, (A, B_q, B.shape, absmax, code, blocksize))","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_autograd","uri":"program://bitsandbytes/module/tests.test_autograd#L1-L264","kind":"module","name":"tests.test_autograd","path":"tests/test_autograd.py","language":"python","start_line":1,"end_line":264,"context_start_line":1,"context_end_line":264,"code":"import pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom tests.helpers import (\n BOOLEAN_TRIPLES,\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n)\n\nTRANSPOSE_VALS = [(False, True), (False, False)]\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dim1\", [40], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [64, 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", [32], ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", [48], ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"decomp\", [0.0, 6.0], ids=id_formatter(\"decomp\"))\n@pytest.mark.parametrize(\n \"funcs\",\n [(torch.matmul, bnb.matmul), (torch.matmul, bnb.research.switchback_bnb)],\n ids=[\"func=matmul\", \"func=switchback_bnb\"],\n)\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"has_fp16_weights\", TRUE_FALSE, ids=id_formatter(\"has_fp16_weights\"))\n@pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\ndef test_matmullt(\n device, dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias\n):\n if device != \"cuda\":\n if funcs[1] == bnb.research.switchback_bnb:\n # TODO: Deprecate/remove?\n pytest.skip(\"switchback_bnb only works on CUDA.\")\n\n if req_grad[1]:\n # This will be deprecated for CUDA in the future. We don't expect\n # this to work on any other device.\n pytest.skip(\"Deprecated feature with CUDA support only.\")\n\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device=device)\n if has_bias == False:\n req_grad = list(req_grad)\n req_grad[2] = False\n\n if device == \"cpu\" and dtype != torch.float32 and has_fp16_weights and any(req_grad):\n if torch.__version__ < (2, 6):\n pytest.xfail(\"mse_loss bf16/fp16 on CPU is not supported in torch < 2.6\")\n\n for i in range(3):\n # normal multiply\n if funcs[0] in [torch.mm, torch.matmul]:\n A = torch.randn(size=dimA, device=device, requires_grad=req_grad[0], dtype=dtype)\n if decomp == 6.0:\n with torch.no_grad():\n A[:, outlier_dim] = 6.0\n B = torch.randn(size=dimB, device=device, requires_grad=req_grad[1], dtype=dtype)\n target = torch.randn(\n size=(dim2, dim4),\n device=device,\n requires_grad=req_grad[1],\n dtype=dtype,\n )\n bias = None\n bias2 = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=dtype, requires_grad=req_grad[2])\n bias2 = bias.clone()\n torch.nn.init.xavier_uniform_(B)\n B2 = B.clone()\n\n state = bnb.MatmulLtState()\n state.threshold = decomp\n state.has_fp16_weights = has_fp16_weights\n if not has_fp16_weights:\n if not transpose[0] and not transpose[1]:\n B2 = B2.t().contiguous()\n\n state.CB, state.SCB, _ = bnb.functional.int8_vectorwise_quant(B2.to(torch.float16))\n B2 = state.CB\n\n if not transpose[0] and transpose[1]:\n out_torch = funcs[0](A, B.t())\n out_bnb = funcs[1](A, B2, state=state, bias=bias2)\n elif not transpose[0] and not transpose[1]:\n out_torch = funcs[0](A, B)\n out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)\n\n if has_bias:\n out_torch += bias\n\n assert out_bnb.dtype == A.dtype, f\"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}\"\n\n n = out_bnb.numel()\n err = torch.abs(out_bnb - out_torch).mean().item()\n # print(f'abs error {err:.4f}')\n\n idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)\n assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)\n idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)\n assert (idx == 0).sum().item() <= n * 0.001\n\n if has_fp16_weights:\n if any(req_grad):\n out_bnb.data.copy_(out_torch)\n if device == \"cuda\":\n torch.cuda.synchronize()\n loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()\n loss_bnb.backward()\n gradA1 = A.grad\n gradB1 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias1 = bias.grad\n bias.grad = None\n\n loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean()\n loss_torch.backward()\n gradA2 = A.grad\n gradB2 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias2 = bias.grad\n bias.grad = None\n\n if req_grad[0]:\n torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1)\n if req_grad[1]:\n n = gradB1.numel()\n if dim2 > 0:\n assert torch.abs(gradB1).sum() > 0.0\n assert torch.abs(gradB2).sum() > 0.0\n else:\n assert torch.abs(gradB1).sum() == 0.0\n assert torch.abs(gradB2).sum() == 0.0\n\n idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.10\n\n idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.02\n\n torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3)\n\n if req_grad[2]:\n torch.testing.assert_close(gradBias1, gradBias2)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dim1\", [48], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [64, 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", [64], ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", [96], ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"funcs\", [(torch.matmul, bnb.matmul_4bit)], ids=[\"func=matmul\"])\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"], ids=id_formatter(\"quant_type\"))\ndef test_matmul_4bit(\n device,\n dim1,\n dim2,\n dim3,\n dim4,\n funcs,\n dtype,\n req_grad,\n transpose,\n has_bias,\n compress_statistics,\n quant_type,\n):\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n if has_bias == False:\n req_grad = list(req_grad)\n req_grad[2] = False\n\n if device == \"cpu\" and dtype != torch.float32 and any(req_grad) and torch.__version__ < (2, 6):\n pytest.xfail(\"mse_loss fp16 on CPU is not supported in torch < 2.6\")\n\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n for i in range(3):\n # normal multiply\n if funcs[0] in [torch.mm, torch.matmul]:\n A = torch.randn(size=dimA, device=device, requires_grad=req_grad[0], dtype=dtype)\n B = torch.randn(size=dimB, device=device, requires_grad=req_grad[1], dtype=dtype)\n target = torch.randn(size=(dim2, dim4), device=device, requires_grad=req_grad[1], dtype=dtype)\n bias = None\n bias2 = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=dtype, requires_grad=req_grad[2])\n bias2 = bias.clone()\n torch.nn.init.xavier_uniform_(B)\n\n B2, quant_state = bnb.functional.quantize_4bit(\n B,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n\n if not transpose[0] and transpose[1]:\n out_torch = funcs[0](A, B.t())\n out_bnb = funcs[1](A, B2.t(), quant_state, bias=bias2)\n elif not transpose[0] and not transpose[1]:\n out_torch = funcs[0](A, B)\n out_bnb = funcs[1](A, B2, quant_state, bias=bias2)\n\n if has_bias:\n out_torch += bias\n\n assert out_bnb.dtype == A.dtype, f\"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}\"\n\n n = out_bnb.numel()\n err = torch.abs(out_bnb - out_torch).float().mean().item()\n if n > 0:\n assert err < 0.115\n\n # assert err < 0.20\n if any(req_grad):\n out_bnb.data.copy_(out_torch)\n if device == \"cuda\":\n torch.cuda.synchronize()\n elif device == \"hpu\":\n torch.hpu.synchronize()\n\n loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()\n loss_bnb.backward()\n gradA1 = A.grad\n gradB1 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias1 = bias.grad\n bias.grad = None\n\n loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean()\n loss_torch.backward()\n gradA2 = A.grad\n gradB2 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias2 = bias.grad\n bias.grad = None\n\n if req_grad[0]:\n torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1)\n\n if req_grad[2]:\n torch.testing.assert_close(gradBias1, gradBias2)","source_hash":"f2957e3b46365e7265c162ce92a8d1ec149cc1c78e43e55cab7625afb5c767c1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_autograd.test_matmullt","uri":"program://bitsandbytes/function/tests.test_autograd.test_matmullt#L33-L155","kind":"function","name":"test_matmullt","path":"tests/test_autograd.py","language":"python","start_line":33,"end_line":155,"context_start_line":13,"context_end_line":175,"code":"\nTRANSPOSE_VALS = [(False, True), (False, False)]\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dim1\", [40], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [64, 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", [32], ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", [48], ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"decomp\", [0.0, 6.0], ids=id_formatter(\"decomp\"))\n@pytest.mark.parametrize(\n \"funcs\",\n [(torch.matmul, bnb.matmul), (torch.matmul, bnb.research.switchback_bnb)],\n ids=[\"func=matmul\", \"func=switchback_bnb\"],\n)\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"has_fp16_weights\", TRUE_FALSE, ids=id_formatter(\"has_fp16_weights\"))\n@pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\ndef test_matmullt(\n device, dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose, decomp, has_fp16_weights, has_bias\n):\n if device != \"cuda\":\n if funcs[1] == bnb.research.switchback_bnb:\n # TODO: Deprecate/remove?\n pytest.skip(\"switchback_bnb only works on CUDA.\")\n\n if req_grad[1]:\n # This will be deprecated for CUDA in the future. We don't expect\n # this to work on any other device.\n pytest.skip(\"Deprecated feature with CUDA support only.\")\n\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n outlier_dim = torch.randint(0, dimA[1], size=(dimA[1] // 8,), device=device)\n if has_bias == False:\n req_grad = list(req_grad)\n req_grad[2] = False\n\n if device == \"cpu\" and dtype != torch.float32 and has_fp16_weights and any(req_grad):\n if torch.__version__ < (2, 6):\n pytest.xfail(\"mse_loss bf16/fp16 on CPU is not supported in torch < 2.6\")\n\n for i in range(3):\n # normal multiply\n if funcs[0] in [torch.mm, torch.matmul]:\n A = torch.randn(size=dimA, device=device, requires_grad=req_grad[0], dtype=dtype)\n if decomp == 6.0:\n with torch.no_grad():\n A[:, outlier_dim] = 6.0\n B = torch.randn(size=dimB, device=device, requires_grad=req_grad[1], dtype=dtype)\n target = torch.randn(\n size=(dim2, dim4),\n device=device,\n requires_grad=req_grad[1],\n dtype=dtype,\n )\n bias = None\n bias2 = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=dtype, requires_grad=req_grad[2])\n bias2 = bias.clone()\n torch.nn.init.xavier_uniform_(B)\n B2 = B.clone()\n\n state = bnb.MatmulLtState()\n state.threshold = decomp\n state.has_fp16_weights = has_fp16_weights\n if not has_fp16_weights:\n if not transpose[0] and not transpose[1]:\n B2 = B2.t().contiguous()\n\n state.CB, state.SCB, _ = bnb.functional.int8_vectorwise_quant(B2.to(torch.float16))\n B2 = state.CB\n\n if not transpose[0] and transpose[1]:\n out_torch = funcs[0](A, B.t())\n out_bnb = funcs[1](A, B2, state=state, bias=bias2)\n elif not transpose[0] and not transpose[1]:\n out_torch = funcs[0](A, B)\n out_bnb = funcs[1](A, B2.t(), state=state, bias=bias2)\n\n if has_bias:\n out_torch += bias\n\n assert out_bnb.dtype == A.dtype, f\"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}\"\n\n n = out_bnb.numel()\n err = torch.abs(out_bnb - out_torch).mean().item()\n # print(f'abs error {err:.4f}')\n\n idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1)\n assert (idx == 0).sum().item() <= n * (0.0175 if dtype == torch.float16 else 0.021)\n idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2)\n assert (idx == 0).sum().item() <= n * 0.001\n\n if has_fp16_weights:\n if any(req_grad):\n out_bnb.data.copy_(out_torch)\n if device == \"cuda\":\n torch.cuda.synchronize()\n loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()\n loss_bnb.backward()\n gradA1 = A.grad\n gradB1 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias1 = bias.grad\n bias.grad = None\n\n loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean()\n loss_torch.backward()\n gradA2 = A.grad\n gradB2 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias2 = bias.grad\n bias.grad = None\n\n if req_grad[0]:\n torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1)\n if req_grad[1]:\n n = gradB1.numel()\n if dim2 > 0:\n assert torch.abs(gradB1).sum() > 0.0\n assert torch.abs(gradB2).sum() > 0.0\n else:\n assert torch.abs(gradB1).sum() == 0.0\n assert torch.abs(gradB2).sum() == 0.0\n\n idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.10\n\n idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.02\n\n torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3)\n\n if req_grad[2]:\n torch.testing.assert_close(gradBias1, gradBias2)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dim1\", [48], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [64, 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", [64], ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", [96], ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"funcs\", [(torch.matmul, bnb.matmul_4bit)], ids=[\"func=matmul\"])\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"], ids=id_formatter(\"quant_type\"))\ndef test_matmul_4bit(\n device,\n dim1,\n dim2,\n dim3,\n dim4,","source_hash":"f2957e3b46365e7265c162ce92a8d1ec149cc1c78e43e55cab7625afb5c767c1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_autograd.test_matmul_4bit","uri":"program://bitsandbytes/function/tests.test_autograd.test_matmul_4bit#L170-L264","kind":"function","name":"test_matmul_4bit","path":"tests/test_autograd.py","language":"python","start_line":170,"end_line":264,"context_start_line":150,"context_end_line":264,"code":" assert (idx == 0).sum().item() <= n * 0.02\n\n torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3)\n\n if req_grad[2]:\n torch.testing.assert_close(gradBias1, gradBias2)\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dim1\", [48], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [64, 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", [64], ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", [96], ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"funcs\", [(torch.matmul, bnb.matmul_4bit)], ids=[\"func=matmul\"])\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"], ids=id_formatter(\"quant_type\"))\ndef test_matmul_4bit(\n device,\n dim1,\n dim2,\n dim3,\n dim4,\n funcs,\n dtype,\n req_grad,\n transpose,\n has_bias,\n compress_statistics,\n quant_type,\n):\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n if has_bias == False:\n req_grad = list(req_grad)\n req_grad[2] = False\n\n if device == \"cpu\" and dtype != torch.float32 and any(req_grad) and torch.__version__ < (2, 6):\n pytest.xfail(\"mse_loss fp16 on CPU is not supported in torch < 2.6\")\n\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n for i in range(3):\n # normal multiply\n if funcs[0] in [torch.mm, torch.matmul]:\n A = torch.randn(size=dimA, device=device, requires_grad=req_grad[0], dtype=dtype)\n B = torch.randn(size=dimB, device=device, requires_grad=req_grad[1], dtype=dtype)\n target = torch.randn(size=(dim2, dim4), device=device, requires_grad=req_grad[1], dtype=dtype)\n bias = None\n bias2 = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=dtype, requires_grad=req_grad[2])\n bias2 = bias.clone()\n torch.nn.init.xavier_uniform_(B)\n\n B2, quant_state = bnb.functional.quantize_4bit(\n B,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n\n if not transpose[0] and transpose[1]:\n out_torch = funcs[0](A, B.t())\n out_bnb = funcs[1](A, B2.t(), quant_state, bias=bias2)\n elif not transpose[0] and not transpose[1]:\n out_torch = funcs[0](A, B)\n out_bnb = funcs[1](A, B2, quant_state, bias=bias2)\n\n if has_bias:\n out_torch += bias\n\n assert out_bnb.dtype == A.dtype, f\"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}\"\n\n n = out_bnb.numel()\n err = torch.abs(out_bnb - out_torch).float().mean().item()\n if n > 0:\n assert err < 0.115\n\n # assert err < 0.20\n if any(req_grad):\n out_bnb.data.copy_(out_torch)\n if device == \"cuda\":\n torch.cuda.synchronize()\n elif device == \"hpu\":\n torch.hpu.synchronize()\n\n loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()\n loss_bnb.backward()\n gradA1 = A.grad\n gradB1 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias1 = bias.grad\n bias.grad = None\n\n loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean()\n loss_torch.backward()\n gradA2 = A.grad\n gradB2 = B.grad\n A.grad = None\n B.grad = None\n if has_bias:\n gradBias2 = bias.grad\n bias.grad = None\n\n if req_grad[0]:\n torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1)\n\n if req_grad[2]:\n torch.testing.assert_close(gradBias1, gradBias2)","source_hash":"f2957e3b46365e7265c162ce92a8d1ec149cc1c78e43e55cab7625afb5c767c1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_cuda_setup_evaluator","uri":"program://bitsandbytes/module/tests.test_cuda_setup_evaluator#L1-L26","kind":"module","name":"tests.test_cuda_setup_evaluator","path":"tests/test_cuda_setup_evaluator.py","language":"python","start_line":1,"end_line":26,"context_start_line":1,"context_end_line":26,"code":"import pytest\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\n\n\n@pytest.fixture\ndef cuda120_spec() -> CUDASpecs:\n return CUDASpecs(\n cuda_version_string=\"120\",\n highest_compute_capability=(8, 6),\n cuda_version_tuple=(12, 0),\n )\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path(monkeypatch, cuda120_spec):\n monkeypatch.delenv(\"BNB_CUDA_VERSION\", raising=False)\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda120\"\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path_override(monkeypatch, cuda120_spec, caplog):\n monkeypatch.setenv(\"BNB_CUDA_VERSION\", \"110\")\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda110\"\n assert \"BNB_CUDA_VERSION\" in caplog.text # did we get the warning?","source_hash":"7ae884114f29cbfc9a69a6b7509832c75cf4093a069a6ed5f6f0cf141e807535","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_cuda_setup_evaluator.cuda120_spec","uri":"program://bitsandbytes/function/tests.test_cuda_setup_evaluator.cuda120_spec#L8-L13","kind":"function","name":"cuda120_spec","path":"tests/test_cuda_setup_evaluator.py","language":"python","start_line":8,"end_line":13,"context_start_line":1,"context_end_line":26,"code":"import pytest\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\n\n\n@pytest.fixture\ndef cuda120_spec() -> CUDASpecs:\n return CUDASpecs(\n cuda_version_string=\"120\",\n highest_compute_capability=(8, 6),\n cuda_version_tuple=(12, 0),\n )\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path(monkeypatch, cuda120_spec):\n monkeypatch.delenv(\"BNB_CUDA_VERSION\", raising=False)\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda120\"\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path_override(monkeypatch, cuda120_spec, caplog):\n monkeypatch.setenv(\"BNB_CUDA_VERSION\", \"110\")\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda110\"\n assert \"BNB_CUDA_VERSION\" in caplog.text # did we get the warning?","source_hash":"7ae884114f29cbfc9a69a6b7509832c75cf4093a069a6ed5f6f0cf141e807535","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_cuda_setup_evaluator.test_get_cuda_bnb_library_path","uri":"program://bitsandbytes/function/tests.test_cuda_setup_evaluator.test_get_cuda_bnb_library_path#L17-L19","kind":"function","name":"test_get_cuda_bnb_library_path","path":"tests/test_cuda_setup_evaluator.py","language":"python","start_line":17,"end_line":19,"context_start_line":1,"context_end_line":26,"code":"import pytest\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\n\n\n@pytest.fixture\ndef cuda120_spec() -> CUDASpecs:\n return CUDASpecs(\n cuda_version_string=\"120\",\n highest_compute_capability=(8, 6),\n cuda_version_tuple=(12, 0),\n )\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path(monkeypatch, cuda120_spec):\n monkeypatch.delenv(\"BNB_CUDA_VERSION\", raising=False)\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda120\"\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path_override(monkeypatch, cuda120_spec, caplog):\n monkeypatch.setenv(\"BNB_CUDA_VERSION\", \"110\")\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda110\"\n assert \"BNB_CUDA_VERSION\" in caplog.text # did we get the warning?","source_hash":"7ae884114f29cbfc9a69a6b7509832c75cf4093a069a6ed5f6f0cf141e807535","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_cuda_setup_evaluator.test_get_cuda_bnb_library_path_override","uri":"program://bitsandbytes/function/tests.test_cuda_setup_evaluator.test_get_cuda_bnb_library_path_override#L23-L26","kind":"function","name":"test_get_cuda_bnb_library_path_override","path":"tests/test_cuda_setup_evaluator.py","language":"python","start_line":23,"end_line":26,"context_start_line":3,"context_end_line":26,"code":"from bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\n\n\n@pytest.fixture\ndef cuda120_spec() -> CUDASpecs:\n return CUDASpecs(\n cuda_version_string=\"120\",\n highest_compute_capability=(8, 6),\n cuda_version_tuple=(12, 0),\n )\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path(monkeypatch, cuda120_spec):\n monkeypatch.delenv(\"BNB_CUDA_VERSION\", raising=False)\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda120\"\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path_override(monkeypatch, cuda120_spec, caplog):\n monkeypatch.setenv(\"BNB_CUDA_VERSION\", \"110\")\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda110\"\n assert \"BNB_CUDA_VERSION\" in caplog.text # did we get the warning?","source_hash":"7ae884114f29cbfc9a69a6b7509832c75cf4093a069a6ed5f6f0cf141e807535","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim","uri":"program://bitsandbytes/module/tests.test_optim#L1-L594","kind":"module","name":"tests.test_optim","path":"tests/test_optim.py","language":"python","start_line":1,"end_line":594,"context_start_line":1,"context_end_line":594,"code":"import os\nfrom os.path import join\nimport shutil\nimport sys\nimport time\nimport uuid\n\nfrom lion_pytorch import Lion\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\nfrom tests.helpers import describe_dtype, get_available_devices, id_formatter\n\n# import apex\n\nk = 20\n\n\ndef assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n error_count = (idx == 0).sum().item()\n if error_count > max_error_count:\n print(f\"Too many values not close: assert {error_count} < {max_error_count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\ndef get_temp_dir():\n path = f\"/tmp/autoswap/{uuid.uuid4()}\"\n os.makedirs(path, exist_ok=True)\n return path\n\n\ndef rm_path(path):\n shutil.rmtree(path)\n\n\nstr2optimizers = {}\n\n## TODO: maybe remove these three.\nstr2optimizers[\"adam_pytorch\"] = (None, torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"lion_pytorch\"] = (None, Lion, bnb.optim.Lion)\nstr2optimizers[\"momentum_pytorch\"] = (\n None,\n lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),\n bnb.optim.Adam,\n)\n\nstr2optimizers[\"adam\"] = (torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"adam8bit_blockwise\"] = (torch.optim.Adam, lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True))\nstr2optimizers[\"paged_adam\"] = (torch.optim.Adam, bnb.optim.PagedAdam)\nstr2optimizers[\"paged_adamw\"] = (torch.optim.AdamW, bnb.optim.PagedAdamW)\nstr2optimizers[\"paged_adam8bit_blockwise\"] = (\n torch.optim.Adam,\n lambda pxx: bnb.optim.PagedAdam8bit(pxx, block_wise=True),\n)\nstr2optimizers[\"paged_adamw8bit_blockwise\"] = (\n torch.optim.AdamW,\n lambda pxx: bnb.optim.PagedAdamW8bit(pxx, block_wise=True),\n)\n\nstr2optimizers[\"ademamix\"] = (bnb.optim.ademamix._ReferenceAdEMAMix, bnb.optim.AdEMAMix)\nstr2optimizers[\"ademamix8bit_blockwise\"] = (\n bnb.optim.ademamix._ReferenceAdEMAMix,\n lambda pxx: bnb.optim.AdEMAMix8bit(pxx),\n)\nstr2optimizers[\"paged_ademamix\"] = (bnb.optim.ademamix._ReferenceAdEMAMix, bnb.optim.PagedAdEMAMix)\nstr2optimizers[\"paged_ademamix8bit_blockwise\"] = (\n bnb.optim.ademamix._ReferenceAdEMAMix,\n lambda pxx: bnb.optim.PagedAdEMAMix8bit(pxx),\n)\nstr2optimizers[\"ademamix_scheduled\"] = (\n lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=k, t_beta3=k),\n lambda pxx: bnb.optim.AdEMAMix(pxx, t_alpha=k, t_beta3=k),\n)\nstr2optimizers[\"paged_ademamix_scheduled\"] = (\n lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=k, t_beta3=k),\n lambda pxx: bnb.optim.PagedAdEMAMix(pxx, t_alpha=k, t_beta3=k),\n)\nstr2optimizers[\"ademamix8bit_blockwise_scheduled\"] = (\n lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=100, t_beta3=100),\n lambda pxx: bnb.optim.AdEMAMix8bit(pxx, t_alpha=100, t_beta3=100),\n)\nstr2optimizers[\"paged_ademamix8bit_blockwise_scheduled\"] = (\n lambda pxx: bnb.optim.ademamix._ReferenceAdEMAMix(pxx, t_alpha=100, t_beta3=100),\n lambda pxx: bnb.optim.PagedAdEMAMix8bit(pxx, t_alpha=100, t_beta3=100),\n)\n\nstr2optimizers[\"lion\"] = (Lion, bnb.optim.Lion)\nstr2optimizers[\"paged_lion\"] = (Lion, bnb.optim.PagedLion)\nstr2optimizers[\"lion8bit_blockwise\"] = (Lion, lambda pxx: bnb.optim.Lion8bit(pxx, block_wise=True))\nstr2optimizers[\"paged_lion8bit_blockwise\"] = (Lion, lambda pxx: bnb.optim.PagedLion8bit(pxx, block_wise=True))\n\nstr2optimizers[\"momentum\"] = (\n lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),\n lambda pxx: bnb.optim.SGD(pxx, 0.01, 0.9, block_wise=False),\n)\nstr2optimizers[\"momentum8bit_blockwise\"] = (\n lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),\n lambda pxx: bnb.optim.SGD8bit(pxx, 0.01, 0.9, block_wise=True),\n)\n\nstr2optimizers[\"rmsprop\"] = (\n lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),\n lambda pxx: bnb.optim.RMSprop(pxx, 0.01, 0.9, block_wise=False),\n)\nstr2optimizers[\"rmsprop8bit_blockwise\"] = (\n lambda pxx: torch.optim.RMSprop(pxx, 0.01, 0.9),\n lambda pxx: bnb.optim.RMSprop8bit(pxx, 0.01, 0.9, block_wise=True),\n)\n\nstr2statenames = {}\nstr2statenames[\"adam\"] = [(\"exp_avg\", \"state1\"), (\"exp_avg_sq\", \"state2\")]\nstr2statenames[\"paged_adamw\"] = [(\"exp_avg\", \"state1\"), (\"exp_avg_sq\", \"state2\")]\nstr2statenames[\"paged_adam\"] = [(\"exp_avg\", \"state1\"), (\"exp_avg_sq\", \"state2\")]\nstr2statenames[\"lion\"] = [(\"exp_avg\", \"state1\")]\nstr2statenames[\"paged_lion\"] = [(\"exp_avg\", \"state1\")]\nstr2statenames[\"momentum\"] = [(\"momentum_buffer\", \"state1\")]\nstr2statenames[\"lamb\"] = [(\"exp_avg\", \"state1\"), (\"exp_avg_sq\", \"state2\")]\nstr2statenames[\"rmsprop\"] = [(\"square_avg\", \"state1\")]\n\nstr2statenames[\"adam8bit_blockwise\"] = [\n (\"exp_avg\", \"state1\", \"qmap1\", \"absmax1\"),\n (\"exp_avg_sq\", \"state2\", \"qmap2\", \"absmax2\"),\n]\nstr2statenames[\"paged_adam8bit_blockwise\"] = [\n (\"exp_avg\", \"state1\", \"qmap1\", \"absmax1\"),\n (\"exp_avg_sq\", \"state2\", \"qmap2\", \"absmax2\"),\n]\nstr2statenames[\"paged_adamw8bit_blockwise\"] = [\n (\"exp_avg\", \"state1\", \"qmap1\", \"absmax1\"),\n (\"exp_avg_sq\", \"state2\", \"qmap2\", \"absmax2\"),\n]\n\nstr2statenames[\"momentum8bit_blockwise\"] = [(\"momentum_buffer\", \"state1\", \"qmap1\", \"absmax1\")]\nstr2statenames[\"rmsprop8bit_blockwise\"] = [(\"square_avg\", \"state1\", \"qmap1\", \"absmax1\")]\nstr2statenames[\"lion8bit_blockwise\"] = [(\"exp_avg\", \"state1\", \"qmap1\", \"absmax1\")]\nstr2statenames[\"paged_lion8bit_blockwise\"] = [(\"exp_avg\", \"state1\", \"qmap1\", \"absmax1\")]\n\nstr2statenames[\"ademamix\"] = str2statenames[\"ademamix_scheduled\"] = [(\"m1_m2\", \"state1\"), (\"nu\", \"state2\")]\nstr2statenames[\"paged_ademamix\"] = str2statenames[\"paged_ademamix_scheduled\"] = [(\"m1_m2\", \"state1\"), (\"nu\", \"state2\")]\nstr2statenames[\"ademamix8bit_blockwise\"] = str2statenames[\"ademamix8bit_blockwise_scheduled\"] = [\n (\"m1_m2\", \"state1\", \"qmap1\", \"absmax1\"),\n (\"nu\", \"state2\", \"qmap2\", \"absmax2\"),\n]\nstr2statenames[\"paged_ademamix8bit_blockwise\"] = [\n (\"m1_m2\", \"state1\", \"qmap1\", \"absmax1\"),\n (\"nu\", \"state2\", \"qmap2\", \"absmax2\"),\n]\n\noptimizer_names_32bit = [\n \"adam\",\n \"paged_adamw\",\n \"paged_adam\",\n \"momentum\",\n \"rmsprop\",\n \"lion\",\n \"paged_lion\",\n \"ademamix\",\n \"ademamix_scheduled\",\n \"paged_ademamix\",\n \"paged_ademamix_scheduled\",\n]\n\n\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_32bit, ids=id_formatter(\"opt\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097, 1], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True), ids=id_formatter(\"device\"))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_optimizer32bit(dim1, dim2, gtype, optim_name, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"Optimizers are only supported on CUDA and XPU\")\n\n if optim_name.startswith(\"paged_\") and sys.platform == \"win32\":\n pytest.skip(\"Paged optimizers can have issues on Windows.\")\n\n if optim_name.startswith(\"paged_\") and device == \"xpu\":\n pytest.skip(\"Paged optimizers are not supported on XPU currently.\")\n\n if gtype == torch.bfloat16 and optim_name in [\"momentum\", \"rmsprop\"]:\n pytest.skip()\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1\n p2 = p1.clone()\n p1 = p1.float()\n\n torch_optimizer = str2optimizers[optim_name][0]([p1])\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n\n if gtype == torch.float32:\n atol, rtol = 1e-6, 1e-5\n elif gtype == torch.bfloat16:\n atol, rtol = 1e-3, 1e-2\n else:\n atol, rtol = 1e-4, 1e-3\n\n for i in range(k):\n g = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.01\n p1.grad = g.clone().float()\n p2.grad = g.clone()\n\n bnb_optimizer.step()\n torch_optimizer.step()\n\n for name1, name2 in str2statenames[optim_name]:\n torch.testing.assert_close(\n torch_optimizer.state[p1][name1],\n bnb_optimizer.state[p2][name2].to(device),\n atol=atol,\n rtol=rtol,\n )\n\n # since Lion can have pretty noisy updates where things lie at the boundary\n # allow up to 15 errors for Lion\n assert_most_approx_close(p1, p2.float(), atol=atol, rtol=rtol, max_error_count=15)\n\n if i % (k // 5) == 0 and i > 0:\n path = get_temp_dir()\n torch.save(bnb_optimizer.state_dict(), join(path, \"opt.pt\"))\n del bnb_optimizer\n bnb_optimizer = None\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n bnb_optimizer.load_state_dict(torch.load(join(path, \"opt.pt\")))\n rm_path(path)\n # since Lion can have pretty noisy updates where things lie at the boundary\n # allow up to 10 errors for Lion\n assert_most_approx_close(p1, p2.float(), atol=atol, rtol=rtol, max_error_count=10)\n for name1, name2 in str2statenames[optim_name]:\n # since Lion can have pretty noisy updates where things lie at the boundary\n # allow up to 10 errors for Lion\n assert_most_approx_close(\n torch_optimizer.state[p1][name1],\n bnb_optimizer.state[p2][name2],\n atol=atol,\n rtol=rtol,\n max_error_count=10,\n )\n\n if gtype != torch.float32:\n # the adam buffers should also be close because they are 32-bit\n # but the parameters can diverge because they are 16-bit\n # the difference grow larger and larger with each update\n # --> copy the state to keep weights close\n p1.data = p1.data.to(p2.dtype).float()\n p2.copy_(p1.data)\n torch.testing.assert_close(p1.to(p2.dtype), p2)\n if optim_name in [\"lars\", \"lamb\"]:\n assert bnb_optimizer.state[p2][\"unorm_vec\"] > 0.0\n\n\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_global_config(dim1, dim2, gtype, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"Optimizers are only supported on CUDA and XPU\")\n\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n p2 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n p3 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n mask = torch.rand_like(p2) < 0.1\n beta1 = 0.9\n beta2 = 0.999\n lr = 0.001\n eps = 1e-8\n\n bnb.optim.GlobalOptimManager.get_instance().initialize()\n bnb.optim.GlobalOptimManager.get_instance().override_config(p3, \"optim_bits\", 8)\n\n bnb.optim.GlobalOptimManager.get_instance().register_parameters([p1, p2, p3])\n p1 = p1.to(device)\n p2 = p2.to(device)\n p3 = p3.to(device)\n\n adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)\n\n if gtype == torch.float32:\n atol, rtol = 1e-6, 1e-5\n else:\n atol, rtol = 1e-4, 1e-3\n\n for i in range(50):\n g1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1 + 0.001\n g2 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1 + 0.001\n g3 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1 + 0.001\n p1.grad = g1\n p2.grad = g2\n p3.grad = g3\n\n adam2.step()\n\n assert adam2.state[p3][\"state1\"].dtype == torch.uint8\n assert adam2.state[p3][\"state2\"].dtype == torch.uint8\n\n\noptimizer_names_8bit = [\n \"adam8bit_blockwise\",\n \"lion8bit_blockwise\",\n \"momentum8bit_blockwise\",\n \"rmsprop8bit_blockwise\",\n \"ademamix8bit_blockwise\",\n \"ademamix8bit_blockwise_scheduled\",\n]\n\n\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_8bit, ids=id_formatter(\"opt\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_optimizer8bit(dim1, dim2, gtype, optim_name, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"8-bit optimizers are only supported on CUDA and XPU\")\n\n torch.set_printoptions(precision=6)\n\n if dim1 == 1 and dim2 == 1:\n return\n\n p1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1\n p2 = p1.clone()\n p1 = p1.float()\n blocksize = 256\n\n torch_optimizer = str2optimizers[optim_name][0]([p1])\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n\n if gtype == torch.float32:\n atol, rtol = 3e-3, 1e-3\n patol, prtol = 1e-5, 1e-3\n elif gtype == torch.bfloat16:\n atol, rtol = 3e-3, 1e-3\n patol, prtol = 1e-4, 1e-2\n else:\n atol, rtol = 3e-3, 1e-3\n patol, prtol = 1e-5, 1e-3\n\n errors = []\n relerrors = []\n\n for i in range(50):\n g = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.01\n p1.grad = g.clone().float()\n p2.grad = g.clone()\n\n torch_optimizer.step()\n bnb_optimizer.step()\n\n # since Lion can have pretty noisy updates where things lie at the boundary\n # assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)\n\n dequant_states = []\n for name1, name2, qmap, max_val in str2statenames[optim_name]:\n ## For AdEMAMix, we need to dequantize [p2][name2][0] and [p2][name2][1]\n ## separately and then stack them. The qmap is shared, but absmax is also stacked.\n if optim_name == \"ademamix8bit_blockwise\" and name1 == \"m1_m2\":\n m1 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][0],\n A=bnb_optimizer.state[p2][name2][0],\n blocksize=blocksize,\n )\n m2 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][1],\n A=bnb_optimizer.state[p2][name2][1],\n blocksize=blocksize,\n )\n\n s1 = torch.stack((m1, m2))\n else:\n s1 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val],\n A=bnb_optimizer.state[p2][name2],\n blocksize=blocksize,\n )\n\n num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0\n assert num_not_close.sum().item() < 20\n dequant_states.append(s1.clone())\n\n err = torch.abs(p1 - p2)\n relerr = err / (torch.abs(p1) + 1e-9)\n if g.dtype == torch.bfloat16:\n assert err.mean() <= 0.00017\n assert relerr.mean() <= 0.0016\n else:\n assert err.mean() < 0.00006\n assert relerr.mean() < 0.0006\n\n errors.append(err.mean().item())\n relerrors.append(relerr.mean().item())\n\n if i % 10 == 0 and i > 0:\n for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):\n s1cpy = s.clone()\n raws1cpy = bnb_optimizer.state[p2][name2].clone()\n qmap1 = bnb_optimizer.state[p2][qmap].clone()\n\n path = get_temp_dir()\n torch.save(bnb_optimizer.state_dict(), join(path, \"opt.pt\"))\n del bnb_optimizer\n bnb_optimizer = None\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n bnb_optimizer.load_state_dict(torch.load(join(path, \"opt.pt\")))\n rm_path(path)\n torch.testing.assert_close(raws1cpy, bnb_optimizer.state[p2][name2])\n torch.testing.assert_close(qmap1, bnb_optimizer.state[p2][qmap])\n\n ## For AdEMAMix, we need to dequantize [p2][name2][0] and [p2][name2][1]\n ## separately and then stack them. The qmap is shared, but absmax is also stacked.\n if optim_name == \"ademamix8bit_blockwise\" and name1 == \"m1_m2\":\n s1 = torch.stack(\n (\n F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][0],\n A=bnb_optimizer.state[p2][name2][0],\n blocksize=blocksize,\n ),\n F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][1],\n A=bnb_optimizer.state[p2][name2][1],\n blocksize=blocksize,\n ),\n )\n )\n else:\n s1 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val],\n A=bnb_optimizer.state[p2][name2],\n blocksize=blocksize,\n )\n\n torch.testing.assert_close(s1cpy, s1)\n\n num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0\n assert num_not_close.sum().item() < 20\n\n # Lion can have pretty noisy updates where things lie at the boundary\n assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)\n\n # the parameters diverge quickly. Here we keep them close\n # together so we can test against the Adam error\n p1.data = p1.data.to(gtype).float()\n p2.copy_(p1.data)\n torch.testing.assert_close(p1.to(gtype), p2)\n for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):\n torch_optimizer.state[p1][name1].copy_(s.data)\n\n\n@pytest.mark.parametrize(\"optim_bits\", [32, 8], ids=id_formatter(\"optim_bits\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.deprecated\ndef test_adam_percentile_clipping(requires_cuda, dim1, dim2, gtype, optim_bits):\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n beta1 = 0.9\n beta2 = 0.999\n lr = 0.001\n eps = 1e-8\n p1 = p1.cuda()\n p2 = p1.clone()\n adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)\n adam2 = bnb.optim.Adam(\n [p2],\n lr,\n (beta1, beta2),\n eps,\n optim_bits=optim_bits,\n percentile_clipping=5,\n )\n\n gnorm_vec = torch.zeros(100).cuda()\n step = 0\n\n for i in range(50):\n step += 1\n g1 = torch.randn(dim1, dim2, device=\"cuda\", dtype=gtype) * 0.1 + (0.01 * i)\n g2 = g1.clone()\n p2.grad = g2\n\n current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(g1, gnorm_vec, step, 5)\n g1 = (g1.float() * gnorm_scale).to(gtype)\n p1.grad = g1\n\n adam1.step()\n adam2.step()\n\n # gnorm_scale is not deterministic (warp reductions), as such there can be slight differences i\n# ... truncated ...","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.assert_most_approx_close","uri":"program://bitsandbytes/function/tests.test_optim.assert_most_approx_close#L22-L27","kind":"function","name":"assert_most_approx_close","path":"tests/test_optim.py","language":"python","start_line":22,"end_line":27,"context_start_line":2,"context_end_line":47,"code":"from os.path import join\nimport shutil\nimport sys\nimport time\nimport uuid\n\nfrom lion_pytorch import Lion\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\nfrom tests.helpers import describe_dtype, get_available_devices, id_formatter\n\n# import apex\n\nk = 20\n\n\ndef assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n error_count = (idx == 0).sum().item()\n if error_count > max_error_count:\n print(f\"Too many values not close: assert {error_count} < {max_error_count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\ndef get_temp_dir():\n path = f\"/tmp/autoswap/{uuid.uuid4()}\"\n os.makedirs(path, exist_ok=True)\n return path\n\n\ndef rm_path(path):\n shutil.rmtree(path)\n\n\nstr2optimizers = {}\n\n## TODO: maybe remove these three.\nstr2optimizers[\"adam_pytorch\"] = (None, torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"lion_pytorch\"] = (None, Lion, bnb.optim.Lion)\nstr2optimizers[\"momentum_pytorch\"] = (\n None,\n lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.get_temp_dir","uri":"program://bitsandbytes/function/tests.test_optim.get_temp_dir#L30-L33","kind":"function","name":"get_temp_dir","path":"tests/test_optim.py","language":"python","start_line":30,"end_line":33,"context_start_line":10,"context_end_line":53,"code":"import torch\n\nimport bitsandbytes as bnb\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\nfrom tests.helpers import describe_dtype, get_available_devices, id_formatter\n\n# import apex\n\nk = 20\n\n\ndef assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n error_count = (idx == 0).sum().item()\n if error_count > max_error_count:\n print(f\"Too many values not close: assert {error_count} < {max_error_count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\ndef get_temp_dir():\n path = f\"/tmp/autoswap/{uuid.uuid4()}\"\n os.makedirs(path, exist_ok=True)\n return path\n\n\ndef rm_path(path):\n shutil.rmtree(path)\n\n\nstr2optimizers = {}\n\n## TODO: maybe remove these three.\nstr2optimizers[\"adam_pytorch\"] = (None, torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"lion_pytorch\"] = (None, Lion, bnb.optim.Lion)\nstr2optimizers[\"momentum_pytorch\"] = (\n None,\n lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),\n bnb.optim.Adam,\n)\n\nstr2optimizers[\"adam\"] = (torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"adam8bit_blockwise\"] = (torch.optim.Adam, lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True))\nstr2optimizers[\"paged_adam\"] = (torch.optim.Adam, bnb.optim.PagedAdam)","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.rm_path","uri":"program://bitsandbytes/function/tests.test_optim.rm_path#L36-L37","kind":"function","name":"rm_path","path":"tests/test_optim.py","language":"python","start_line":36,"end_line":37,"context_start_line":16,"context_end_line":57,"code":"\n# import apex\n\nk = 20\n\n\ndef assert_most_approx_close(a, b, rtol=1e-3, atol=1e-3, max_error_count=0):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n error_count = (idx == 0).sum().item()\n if error_count > max_error_count:\n print(f\"Too many values not close: assert {error_count} < {max_error_count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n\ndef get_temp_dir():\n path = f\"/tmp/autoswap/{uuid.uuid4()}\"\n os.makedirs(path, exist_ok=True)\n return path\n\n\ndef rm_path(path):\n shutil.rmtree(path)\n\n\nstr2optimizers = {}\n\n## TODO: maybe remove these three.\nstr2optimizers[\"adam_pytorch\"] = (None, torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"lion_pytorch\"] = (None, Lion, bnb.optim.Lion)\nstr2optimizers[\"momentum_pytorch\"] = (\n None,\n lambda pxx: torch.optim.SGD(pxx, 0.01, 0.9),\n bnb.optim.Adam,\n)\n\nstr2optimizers[\"adam\"] = (torch.optim.Adam, bnb.optim.Adam)\nstr2optimizers[\"adam8bit_blockwise\"] = (torch.optim.Adam, lambda pxx: bnb.optim.Adam8bit(pxx, block_wise=True))\nstr2optimizers[\"paged_adam\"] = (torch.optim.Adam, bnb.optim.PagedAdam)\nstr2optimizers[\"paged_adamw\"] = (torch.optim.AdamW, bnb.optim.PagedAdamW)\nstr2optimizers[\"paged_adam8bit_blockwise\"] = (\n torch.optim.Adam,\n lambda pxx: bnb.optim.PagedAdam8bit(pxx, block_wise=True),","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.test_optimizer32bit","uri":"program://bitsandbytes/function/tests.test_optim.test_optimizer32bit#L174-L253","kind":"function","name":"test_optimizer32bit","path":"tests/test_optim.py","language":"python","start_line":174,"end_line":253,"context_start_line":154,"context_end_line":273,"code":" \"adam\",\n \"paged_adamw\",\n \"paged_adam\",\n \"momentum\",\n \"rmsprop\",\n \"lion\",\n \"paged_lion\",\n \"ademamix\",\n \"ademamix_scheduled\",\n \"paged_ademamix\",\n \"paged_ademamix_scheduled\",\n]\n\n\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_32bit, ids=id_formatter(\"opt\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097, 1], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True), ids=id_formatter(\"device\"))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_optimizer32bit(dim1, dim2, gtype, optim_name, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"Optimizers are only supported on CUDA and XPU\")\n\n if optim_name.startswith(\"paged_\") and sys.platform == \"win32\":\n pytest.skip(\"Paged optimizers can have issues on Windows.\")\n\n if optim_name.startswith(\"paged_\") and device == \"xpu\":\n pytest.skip(\"Paged optimizers are not supported on XPU currently.\")\n\n if gtype == torch.bfloat16 and optim_name in [\"momentum\", \"rmsprop\"]:\n pytest.skip()\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1\n p2 = p1.clone()\n p1 = p1.float()\n\n torch_optimizer = str2optimizers[optim_name][0]([p1])\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n\n if gtype == torch.float32:\n atol, rtol = 1e-6, 1e-5\n elif gtype == torch.bfloat16:\n atol, rtol = 1e-3, 1e-2\n else:\n atol, rtol = 1e-4, 1e-3\n\n for i in range(k):\n g = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.01\n p1.grad = g.clone().float()\n p2.grad = g.clone()\n\n bnb_optimizer.step()\n torch_optimizer.step()\n\n for name1, name2 in str2statenames[optim_name]:\n torch.testing.assert_close(\n torch_optimizer.state[p1][name1],\n bnb_optimizer.state[p2][name2].to(device),\n atol=atol,\n rtol=rtol,\n )\n\n # since Lion can have pretty noisy updates where things lie at the boundary\n # allow up to 15 errors for Lion\n assert_most_approx_close(p1, p2.float(), atol=atol, rtol=rtol, max_error_count=15)\n\n if i % (k // 5) == 0 and i > 0:\n path = get_temp_dir()\n torch.save(bnb_optimizer.state_dict(), join(path, \"opt.pt\"))\n del bnb_optimizer\n bnb_optimizer = None\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n bnb_optimizer.load_state_dict(torch.load(join(path, \"opt.pt\")))\n rm_path(path)\n # since Lion can have pretty noisy updates where things lie at the boundary\n # allow up to 10 errors for Lion\n assert_most_approx_close(p1, p2.float(), atol=atol, rtol=rtol, max_error_count=10)\n for name1, name2 in str2statenames[optim_name]:\n # since Lion can have pretty noisy updates where things lie at the boundary\n # allow up to 10 errors for Lion\n assert_most_approx_close(\n torch_optimizer.state[p1][name1],\n bnb_optimizer.state[p2][name2],\n atol=atol,\n rtol=rtol,\n max_error_count=10,\n )\n\n if gtype != torch.float32:\n # the adam buffers should also be close because they are 32-bit\n # but the parameters can diverge because they are 16-bit\n # the difference grow larger and larger with each update\n # --> copy the state to keep weights close\n p1.data = p1.data.to(p2.dtype).float()\n p2.copy_(p1.data)\n torch.testing.assert_close(p1.to(p2.dtype), p2)\n if optim_name in [\"lars\", \"lamb\"]:\n assert bnb_optimizer.state[p2][\"unorm_vec\"] > 0.0\n\n\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_global_config(dim1, dim2, gtype, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"Optimizers are only supported on CUDA and XPU\")\n\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n p2 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n p3 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n mask = torch.rand_like(p2) < 0.1\n beta1 = 0.9\n beta2 = 0.999\n lr = 0.001","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.test_global_config","uri":"program://bitsandbytes/function/tests.test_optim.test_global_config#L261-L302","kind":"function","name":"test_global_config","path":"tests/test_optim.py","language":"python","start_line":261,"end_line":302,"context_start_line":241,"context_end_line":322,"code":" max_error_count=10,\n )\n\n if gtype != torch.float32:\n # the adam buffers should also be close because they are 32-bit\n # but the parameters can diverge because they are 16-bit\n # the difference grow larger and larger with each update\n # --> copy the state to keep weights close\n p1.data = p1.data.to(p2.dtype).float()\n p2.copy_(p1.data)\n torch.testing.assert_close(p1.to(p2.dtype), p2)\n if optim_name in [\"lars\", \"lamb\"]:\n assert bnb_optimizer.state[p2][\"unorm_vec\"] > 0.0\n\n\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_global_config(dim1, dim2, gtype, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"Optimizers are only supported on CUDA and XPU\")\n\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n p2 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n p3 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n mask = torch.rand_like(p2) < 0.1\n beta1 = 0.9\n beta2 = 0.999\n lr = 0.001\n eps = 1e-8\n\n bnb.optim.GlobalOptimManager.get_instance().initialize()\n bnb.optim.GlobalOptimManager.get_instance().override_config(p3, \"optim_bits\", 8)\n\n bnb.optim.GlobalOptimManager.get_instance().register_parameters([p1, p2, p3])\n p1 = p1.to(device)\n p2 = p2.to(device)\n p3 = p3.to(device)\n\n adam2 = bnb.optim.Adam([p1, p2, p3], lr, (beta1, beta2), eps)\n\n if gtype == torch.float32:\n atol, rtol = 1e-6, 1e-5\n else:\n atol, rtol = 1e-4, 1e-3\n\n for i in range(50):\n g1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1 + 0.001\n g2 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1 + 0.001\n g3 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1 + 0.001\n p1.grad = g1\n p2.grad = g2\n p3.grad = g3\n\n adam2.step()\n\n assert adam2.state[p3][\"state1\"].dtype == torch.uint8\n assert adam2.state[p3][\"state2\"].dtype == torch.uint8\n\n\noptimizer_names_8bit = [\n \"adam8bit_blockwise\",\n \"lion8bit_blockwise\",\n \"momentum8bit_blockwise\",\n \"rmsprop8bit_blockwise\",\n \"ademamix8bit_blockwise\",\n \"ademamix8bit_blockwise_scheduled\",\n]\n\n\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_8bit, ids=id_formatter(\"opt\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_optimizer8bit(dim1, dim2, gtype, optim_name, device):\n if device not in [\"cuda\", \"xpu\"]:","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.test_optimizer8bit","uri":"program://bitsandbytes/function/tests.test_optim.test_optimizer8bit#L321-L462","kind":"function","name":"test_optimizer8bit","path":"tests/test_optim.py","language":"python","start_line":321,"end_line":462,"context_start_line":301,"context_end_line":482,"code":" assert adam2.state[p3][\"state1\"].dtype == torch.uint8\n assert adam2.state[p3][\"state2\"].dtype == torch.uint8\n\n\noptimizer_names_8bit = [\n \"adam8bit_blockwise\",\n \"lion8bit_blockwise\",\n \"momentum8bit_blockwise\",\n \"rmsprop8bit_blockwise\",\n \"ademamix8bit_blockwise\",\n \"ademamix8bit_blockwise_scheduled\",\n]\n\n\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_8bit, ids=id_formatter(\"opt\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"device\", get_available_devices(no_cpu=True))\n@pytest.mark.skipif(not get_available_devices(no_cpu=True), reason=\"No device\")\ndef test_optimizer8bit(dim1, dim2, gtype, optim_name, device):\n if device not in [\"cuda\", \"xpu\"]:\n pytest.skip(\"8-bit optimizers are only supported on CUDA and XPU\")\n\n torch.set_printoptions(precision=6)\n\n if dim1 == 1 and dim2 == 1:\n return\n\n p1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1\n p2 = p1.clone()\n p1 = p1.float()\n blocksize = 256\n\n torch_optimizer = str2optimizers[optim_name][0]([p1])\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n\n if gtype == torch.float32:\n atol, rtol = 3e-3, 1e-3\n patol, prtol = 1e-5, 1e-3\n elif gtype == torch.bfloat16:\n atol, rtol = 3e-3, 1e-3\n patol, prtol = 1e-4, 1e-2\n else:\n atol, rtol = 3e-3, 1e-3\n patol, prtol = 1e-5, 1e-3\n\n errors = []\n relerrors = []\n\n for i in range(50):\n g = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.01\n p1.grad = g.clone().float()\n p2.grad = g.clone()\n\n torch_optimizer.step()\n bnb_optimizer.step()\n\n # since Lion can have pretty noisy updates where things lie at the boundary\n # assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)\n\n dequant_states = []\n for name1, name2, qmap, max_val in str2statenames[optim_name]:\n ## For AdEMAMix, we need to dequantize [p2][name2][0] and [p2][name2][1]\n ## separately and then stack them. The qmap is shared, but absmax is also stacked.\n if optim_name == \"ademamix8bit_blockwise\" and name1 == \"m1_m2\":\n m1 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][0],\n A=bnb_optimizer.state[p2][name2][0],\n blocksize=blocksize,\n )\n m2 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][1],\n A=bnb_optimizer.state[p2][name2][1],\n blocksize=blocksize,\n )\n\n s1 = torch.stack((m1, m2))\n else:\n s1 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val],\n A=bnb_optimizer.state[p2][name2],\n blocksize=blocksize,\n )\n\n num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0\n assert num_not_close.sum().item() < 20\n dequant_states.append(s1.clone())\n\n err = torch.abs(p1 - p2)\n relerr = err / (torch.abs(p1) + 1e-9)\n if g.dtype == torch.bfloat16:\n assert err.mean() <= 0.00017\n assert relerr.mean() <= 0.0016\n else:\n assert err.mean() < 0.00006\n assert relerr.mean() < 0.0006\n\n errors.append(err.mean().item())\n relerrors.append(relerr.mean().item())\n\n if i % 10 == 0 and i > 0:\n for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):\n s1cpy = s.clone()\n raws1cpy = bnb_optimizer.state[p2][name2].clone()\n qmap1 = bnb_optimizer.state[p2][qmap].clone()\n\n path = get_temp_dir()\n torch.save(bnb_optimizer.state_dict(), join(path, \"opt.pt\"))\n del bnb_optimizer\n bnb_optimizer = None\n bnb_optimizer = str2optimizers[optim_name][1]([p2])\n bnb_optimizer.load_state_dict(torch.load(join(path, \"opt.pt\")))\n rm_path(path)\n torch.testing.assert_close(raws1cpy, bnb_optimizer.state[p2][name2])\n torch.testing.assert_close(qmap1, bnb_optimizer.state[p2][qmap])\n\n ## For AdEMAMix, we need to dequantize [p2][name2][0] and [p2][name2][1]\n ## separately and then stack them. The qmap is shared, but absmax is also stacked.\n if optim_name == \"ademamix8bit_blockwise\" and name1 == \"m1_m2\":\n s1 = torch.stack(\n (\n F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][0],\n A=bnb_optimizer.state[p2][name2][0],\n blocksize=blocksize,\n ),\n F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val][1],\n A=bnb_optimizer.state[p2][name2][1],\n blocksize=blocksize,\n ),\n )\n )\n else:\n s1 = F.dequantize_blockwise(\n code=bnb_optimizer.state[p2][qmap],\n absmax=bnb_optimizer.state[p2][max_val],\n A=bnb_optimizer.state[p2][name2],\n blocksize=blocksize,\n )\n\n torch.testing.assert_close(s1cpy, s1)\n\n num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0\n assert num_not_close.sum().item() < 20\n\n # Lion can have pretty noisy updates where things lie at the boundary\n assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)\n\n # the parameters diverge quickly. Here we keep them close\n # together so we can test against the Adam error\n p1.data = p1.data.to(gtype).float()\n p2.copy_(p1.data)\n torch.testing.assert_close(p1.to(gtype), p2)\n for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):\n torch_optimizer.state[p1][name1].copy_(s.data)\n\n\n@pytest.mark.parametrize(\"optim_bits\", [32, 8], ids=id_formatter(\"optim_bits\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.deprecated\ndef test_adam_percentile_clipping(requires_cuda, dim1, dim2, gtype, optim_bits):\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n beta1 = 0.9\n beta2 = 0.999\n lr = 0.001\n eps = 1e-8\n p1 = p1.cuda()\n p2 = p1.clone()\n adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)\n adam2 = bnb.optim.Adam(\n [p2],","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.test_adam_percentile_clipping","uri":"program://bitsandbytes/function/tests.test_optim.test_adam_percentile_clipping#L470-L550","kind":"function","name":"test_adam_percentile_clipping","path":"tests/test_optim.py","language":"python","start_line":470,"end_line":550,"context_start_line":450,"context_end_line":570,"code":" num_not_close = torch.isclose(torch_optimizer.state[p1][name1], s1, atol=atol, rtol=rtol) == 0\n assert num_not_close.sum().item() < 20\n\n # Lion can have pretty noisy updates where things lie at the boundary\n assert_most_approx_close(p1, p2.float(), patol, prtol, max_error_count=0)\n\n # the parameters diverge quickly. Here we keep them close\n # together so we can test against the Adam error\n p1.data = p1.data.to(gtype).float()\n p2.copy_(p1.data)\n torch.testing.assert_close(p1.to(gtype), p2)\n for (name1, name2, qmap, max_val), s in zip(str2statenames[optim_name], dequant_states):\n torch_optimizer.state[p1][name1].copy_(s.data)\n\n\n@pytest.mark.parametrize(\"optim_bits\", [32, 8], ids=id_formatter(\"optim_bits\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"dim2\", [32, 1024, 4097], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim1\", [1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.deprecated\ndef test_adam_percentile_clipping(requires_cuda, dim1, dim2, gtype, optim_bits):\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=\"cpu\", dtype=gtype) * 0.1\n beta1 = 0.9\n beta2 = 0.999\n lr = 0.001\n eps = 1e-8\n p1 = p1.cuda()\n p2 = p1.clone()\n adam1 = bnb.optim.Adam([p1], lr, (beta1, beta2), eps, optim_bits=optim_bits)\n adam2 = bnb.optim.Adam(\n [p2],\n lr,\n (beta1, beta2),\n eps,\n optim_bits=optim_bits,\n percentile_clipping=5,\n )\n\n gnorm_vec = torch.zeros(100).cuda()\n step = 0\n\n for i in range(50):\n step += 1\n g1 = torch.randn(dim1, dim2, device=\"cuda\", dtype=gtype) * 0.1 + (0.01 * i)\n g2 = g1.clone()\n p2.grad = g2\n\n current_gnorm, clip_val, gnorm_scale = F.percentile_clipping(g1, gnorm_vec, step, 5)\n g1 = (g1.float() * gnorm_scale).to(gtype)\n p1.grad = g1\n\n adam1.step()\n adam2.step()\n\n # gnorm_scale is not deterministic (warp reductions), as such there can be slight differences in state\n if optim_bits == 32:\n torch.testing.assert_close(p1, p2)\n torch.testing.assert_close(\n adam1.state[p1][\"state1\"],\n adam2.state[p2][\"state1\"],\n atol=5e-5,\n rtol=1e-4,\n )\n torch.testing.assert_close(\n adam1.state[p1][\"state2\"],\n adam2.state[p2][\"state2\"],\n atol=5e-5,\n rtol=1e-4,\n )\n elif optim_bits == 8:\n torch.testing.assert_close(p1, p2, atol=1e-4, rtol=1e-3)\n torch.testing.assert_close(\n adam1.state[p1][\"state1\"],\n adam2.state[p2][\"state1\"],\n atol=2,\n rtol=1e-3,\n )\n torch.testing.assert_close(\n adam1.state[p1][\"state2\"],\n adam2.state[p2][\"state2\"],\n atol=2,\n rtol=1e-3,\n )\n adam1.state[p1][\"state1\"].copy_(adam2.state[p2][\"state1\"])\n adam1.state[p1][\"state2\"].copy_(adam2.state[p2][\"state2\"])\n if i % 10 == 0 and i > 0:\n path = get_temp_dir()\n torch.save(adam2.state_dict(), join(path, \"opt.pt\"))\n del adam2\n adam2 = None\n adam2 = bnb.optim.Adam(\n [p2],\n lr,\n (beta1, beta2),\n eps,\n optim_bits=optim_bits,\n percentile_clipping=5,\n )\n adam2.load_state_dict(torch.load(join(path, \"opt.pt\")))\n\n\noptimizer_names_benchmark = [\n \"adam8bit_blockwise\",\n \"paged_adam8bit_blockwise\",\n \"ademamix8bit_blockwise\",\n \"paged_ademamix8bit_blockwise\",\n \"ademamix8bit_blockwise_scheduled\",\n \"paged_ademamix8bit_blockwise_scheduled\",\n \"lion8bit_blockwise\",\n \"paged_lion8bit_blockwise\",\n \"paged_ademamix8bit_blockwise\",\n]\n\n\n@pytest.mark.parametrize(\"dim1\", [4096], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [4096], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.bfloat16, torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_benchmark, ids=id_formatter(\"opt\"))\n@pytest.mark.benchmark","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_optim.test_benchmark_blockwise","uri":"program://bitsandbytes/function/tests.test_optim.test_benchmark_blockwise#L571-L593","kind":"function","name":"test_benchmark_blockwise","path":"tests/test_optim.py","language":"python","start_line":571,"end_line":593,"context_start_line":551,"context_end_line":594,"code":"\n\noptimizer_names_benchmark = [\n \"adam8bit_blockwise\",\n \"paged_adam8bit_blockwise\",\n \"ademamix8bit_blockwise\",\n \"paged_ademamix8bit_blockwise\",\n \"ademamix8bit_blockwise_scheduled\",\n \"paged_ademamix8bit_blockwise_scheduled\",\n \"lion8bit_blockwise\",\n \"paged_lion8bit_blockwise\",\n \"paged_ademamix8bit_blockwise\",\n]\n\n\n@pytest.mark.parametrize(\"dim1\", [4096], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [4096], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.bfloat16, torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"optim_name\", optimizer_names_benchmark, ids=id_formatter(\"opt\"))\n@pytest.mark.benchmark\ndef test_benchmark_blockwise(dim1, dim2, gtype, optim_name, device):\n if dim1 == 1 and dim2 == 1:\n return\n p1 = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.1\n\n bnb_optimizer = str2optimizers[optim_name][1]([p1])\n\n g = torch.randn(dim1, dim2, device=device, dtype=gtype) * 0.01\n p1.grad = g\n total_steps = 500\n for i in range(total_steps):\n if i == total_steps // 5:\n # 100 iterations for burn-in\n sync_gpu(p1)\n t0 = time.time()\n\n bnb_optimizer.step()\n\n sync_gpu(p1)\n s = time.time() - t0\n print(\"\")\n params = (total_steps - total_steps // 5) * dim1 * dim2\n print(optim_name, gtype, s, params, s / params)\n # assert s < 3.9","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_deprecated","uri":"program://bitsandbytes/module/tests.test_deprecated#L1-L175","kind":"module","name":"tests.test_deprecated","path":"tests/test_deprecated.py","language":"python","start_line":1,"end_line":175,"context_start_line":1,"context_end_line":175,"code":"import pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\nfrom tests.helpers import BOOLEAN_TRIPLES, describe_dtype, get_test_dims, id_formatter\nfrom tests.test_autograd import TRANSPOSE_VALS\n\n\n@pytest.mark.deprecated\ndef test_dynamic_quantization():\n diffs = []\n reldiffs = []\n for i in range(100):\n A1 = torch.randn(1024, 1024, device=\"cuda\")\n C, S = F.quantize(A1)\n A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diff.mean().item() < 0.0135\n print(sum(diffs) / len(diffs))\n print(sum(reldiffs) / len(reldiffs))\n\n for i in range(100):\n A1 = torch.rand(1024, 1024, device=\"cuda\")\n C, S = F.quantize(A1)\n A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2).mean().item()\n torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n assert diff < 0.004\n\n\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16], ids=[\"float\", \"half\"])\n@pytest.mark.deprecated\ndef test_percentile_clipping(gtype):\n gnorm_vec1 = torch.zeros(100, device=\"cuda\")\n gnorm_vec2 = torch.zeros(100, device=\"cuda\")\n n = 4\n step = 0\n percentile = 5\n for i in range(20):\n step += 1\n g = torch.randn(n, n, dtype=gtype, device=\"cuda\")\n gnorm1, clip2, gnorm_scale = F.percentile_clipping(g, gnorm_vec2, step, percentile=percentile)\n assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1\n\n gnorm2 = torch.norm(g.float())\n if step == 1:\n gnorm_vec1[:] = gnorm2\n else:\n gnorm_vec1[step % 100] = gnorm2\n\n vals, idx = torch.sort(gnorm_vec1)\n clip1 = vals[percentile]\n\n torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))\n torch.testing.assert_close(clip1, clip2)\n torch.testing.assert_close(gnorm1, gnorm2)\n\n\n@pytest.mark.parametrize(\"dim1\", get_test_dims(16, 64, n=1), ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [*get_test_dims(32, 96, n=1), 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", get_test_dims(32, 96, n=1), ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", get_test_dims(32, 96, n=1), ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\n \"funcs\",\n [(torch.matmul, bnb.research.matmul_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)],\n ids=[\"matmul_fp8_mixed\", \"matmul_fp8_global\"],\n)\n@pytest.mark.deprecated\n@pytest.mark.skip(\"Deprecated functionality, to be removed.\")\ndef test_matmul_fp8(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n req_grad = list(req_grad)\n req_grad[2] = False\n\n for i in range(3):\n # normal multiply\n if funcs[0] in [torch.mm, torch.matmul]:\n A = torch.randn(size=dimA, device=\"cuda\", requires_grad=req_grad[0], dtype=dtype)\n B = torch.randn(size=dimB, device=\"cuda\", requires_grad=req_grad[1], dtype=dtype)\n target = torch.randn(size=(dim2, dim4), device=\"cuda\", requires_grad=req_grad[1], dtype=dtype)\n\n torch.nn.init.xavier_uniform_(B)\n\n fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(A.device)\n bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(A.device)\n\n if not transpose[0] and transpose[1]:\n out_torch = funcs[0](A, B.t())\n out_bnb = funcs[1](A, B.t(), fw_code, bw_code)\n elif not transpose[0] and not transpose[1]:\n out_torch = funcs[0](A, B)\n out_bnb = funcs[1](A, B, fw_code, bw_code)\n\n assert out_bnb.dtype == A.dtype, f\"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}\"\n\n n = out_bnb.numel()\n err = torch.abs(out_bnb - out_torch).float().mean().item()\n if n > 0:\n assert err < 0.115\n # assert err < 0.20\n if any(req_grad):\n out_bnb.data.copy_(out_torch)\n torch.cuda.synchronize()\n loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()\n loss_bnb.backward()\n gradA1 = A.grad\n gradB1 = B.grad\n A.grad = None\n B.grad = None\n\n loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean()\n loss_torch.backward()\n gradA2 = A.grad\n gradB2 = B.grad\n A.grad = None\n B.grad = None\n\n if req_grad[0]:\n torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1)\n\n if req_grad[1]:\n n = gradB1.numel()\n if dim2 > 0:\n assert torch.abs(gradB1).sum() > 0.0\n assert torch.abs(gradB2).sum() > 0.0\n else:\n assert torch.abs(gradB1).sum() == 0.0\n assert torch.abs(gradB2).sum() == 0.0\n idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)\n\n assert (idx == 0).sum().item() <= n * 0.1\n idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.02\n grad_err = (gradB1 - gradB2).abs().mean()\n assert grad_err.item() < 0.003\n torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3)\n\n\n@pytest.mark.deprecated\ndef test_fp8linear():\n b = 10\n h = 1024\n inp = torch.randn(b, h).cuda()\n fp32 = torch.nn.Linear(h, h * 2).cuda()\n fp8 = bnb.research.nn.LinearFP8Mixed(h, h * 2).cuda()\n fp32b = torch.nn.Linear(h * 2, h).cuda()\n fp8b = bnb.research.nn.LinearFP8Mixed(h * 2, h).cuda()\n\n fp8.weight.data.copy_(fp32.weight.data)\n fp8.bias.data.copy_(fp32.bias.data)\n fp8b.weight.data.copy_(fp32b.weight.data)\n fp8b.bias.data.copy_(fp32b.bias.data)\n\n a = fp32b(torch.nn.functional.gelu(fp32(inp)))\n b = fp8b(torch.nn.functional.gelu(fp8(inp)))\n\n err = (a - b).abs().mean()\n\n a.mean().backward()\n b.mean().backward()\n\n graderr = (fp8.weight.grad - fp32.weight.grad).abs().mean()\n bgraderr = (fp8.bias.grad - fp32.bias.grad).abs().mean()\n\n assert err < 0.05\n assert graderr < 0.00002\n assert bgraderr < 0.00002","source_hash":"0af044ebedb84e54edb6e503c4debe8156e01eff3c2fee7958fae251a44a193b","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_deprecated.test_dynamic_quantization","uri":"program://bitsandbytes/function/tests.test_deprecated.test_dynamic_quantization#L11-L32","kind":"function","name":"test_dynamic_quantization","path":"tests/test_deprecated.py","language":"python","start_line":11,"end_line":32,"context_start_line":1,"context_end_line":52,"code":"import pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\nfrom tests.helpers import BOOLEAN_TRIPLES, describe_dtype, get_test_dims, id_formatter\nfrom tests.test_autograd import TRANSPOSE_VALS\n\n\n@pytest.mark.deprecated\ndef test_dynamic_quantization():\n diffs = []\n reldiffs = []\n for i in range(100):\n A1 = torch.randn(1024, 1024, device=\"cuda\")\n C, S = F.quantize(A1)\n A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diff.mean().item() < 0.0135\n print(sum(diffs) / len(diffs))\n print(sum(reldiffs) / len(reldiffs))\n\n for i in range(100):\n A1 = torch.rand(1024, 1024, device=\"cuda\")\n C, S = F.quantize(A1)\n A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2).mean().item()\n torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n assert diff < 0.004\n\n\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16], ids=[\"float\", \"half\"])\n@pytest.mark.deprecated\ndef test_percentile_clipping(gtype):\n gnorm_vec1 = torch.zeros(100, device=\"cuda\")\n gnorm_vec2 = torch.zeros(100, device=\"cuda\")\n n = 4\n step = 0\n percentile = 5\n for i in range(20):\n step += 1\n g = torch.randn(n, n, dtype=gtype, device=\"cuda\")\n gnorm1, clip2, gnorm_scale = F.percentile_clipping(g, gnorm_vec2, step, percentile=percentile)\n assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1\n\n gnorm2 = torch.norm(g.float())\n if step == 1:\n gnorm_vec1[:] = gnorm2\n else:","source_hash":"0af044ebedb84e54edb6e503c4debe8156e01eff3c2fee7958fae251a44a193b","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_deprecated.test_percentile_clipping","uri":"program://bitsandbytes/function/tests.test_deprecated.test_percentile_clipping#L37-L60","kind":"function","name":"test_percentile_clipping","path":"tests/test_deprecated.py","language":"python","start_line":37,"end_line":60,"context_start_line":17,"context_end_line":80,"code":" A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diff.mean().item() < 0.0135\n print(sum(diffs) / len(diffs))\n print(sum(reldiffs) / len(reldiffs))\n\n for i in range(100):\n A1 = torch.rand(1024, 1024, device=\"cuda\")\n C, S = F.quantize(A1)\n A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2).mean().item()\n torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n assert diff < 0.004\n\n\n@pytest.mark.parametrize(\"gtype\", [torch.float32, torch.float16], ids=[\"float\", \"half\"])\n@pytest.mark.deprecated\ndef test_percentile_clipping(gtype):\n gnorm_vec1 = torch.zeros(100, device=\"cuda\")\n gnorm_vec2 = torch.zeros(100, device=\"cuda\")\n n = 4\n step = 0\n percentile = 5\n for i in range(20):\n step += 1\n g = torch.randn(n, n, dtype=gtype, device=\"cuda\")\n gnorm1, clip2, gnorm_scale = F.percentile_clipping(g, gnorm_vec2, step, percentile=percentile)\n assert gnorm_scale == 1.0 if gnorm1 < clip2 else clip2 / gnorm1\n\n gnorm2 = torch.norm(g.float())\n if step == 1:\n gnorm_vec1[:] = gnorm2\n else:\n gnorm_vec1[step % 100] = gnorm2\n\n vals, idx = torch.sort(gnorm_vec1)\n clip1 = vals[percentile]\n\n torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))\n torch.testing.assert_close(clip1, clip2)\n torch.testing.assert_close(gnorm1, gnorm2)\n\n\n@pytest.mark.parametrize(\"dim1\", get_test_dims(16, 64, n=1), ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [*get_test_dims(32, 96, n=1), 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", get_test_dims(32, 96, n=1), ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", get_test_dims(32, 96, n=1), ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\n \"funcs\",\n [(torch.matmul, bnb.research.matmul_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)],\n ids=[\"matmul_fp8_mixed\", \"matmul_fp8_global\"],\n)\n@pytest.mark.deprecated\n@pytest.mark.skip(\"Deprecated functionality, to be removed.\")\ndef test_matmul_fp8(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n req_grad = list(req_grad)","source_hash":"0af044ebedb84e54edb6e503c4debe8156e01eff3c2fee7958fae251a44a193b","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_deprecated.test_matmul_fp8","uri":"program://bitsandbytes/function/tests.test_deprecated.test_matmul_fp8#L77-L144","kind":"function","name":"test_matmul_fp8","path":"tests/test_deprecated.py","language":"python","start_line":77,"end_line":144,"context_start_line":57,"context_end_line":164,"code":"\n torch.testing.assert_close(gnorm_vec1, torch.sqrt(gnorm_vec2))\n torch.testing.assert_close(clip1, clip2)\n torch.testing.assert_close(gnorm1, gnorm2)\n\n\n@pytest.mark.parametrize(\"dim1\", get_test_dims(16, 64, n=1), ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [*get_test_dims(32, 96, n=1), 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", get_test_dims(32, 96, n=1), ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", get_test_dims(32, 96, n=1), ids=id_formatter(\"dim4\"))\n@pytest.mark.parametrize(\"req_grad\", BOOLEAN_TRIPLES, ids=id_formatter(\"req_grad\"))\n@pytest.mark.parametrize(\"transpose\", TRANSPOSE_VALS, ids=id_formatter(\"transpose\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\n \"funcs\",\n [(torch.matmul, bnb.research.matmul_fp8_mixed), (torch.matmul, bnb.research.matmul_fp8_global)],\n ids=[\"matmul_fp8_mixed\", \"matmul_fp8_global\"],\n)\n@pytest.mark.deprecated\n@pytest.mark.skip(\"Deprecated functionality, to be removed.\")\ndef test_matmul_fp8(dim1, dim2, dim3, dim4, funcs, dtype, req_grad, transpose):\n dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2)\n dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3)\n req_grad = list(req_grad)\n req_grad[2] = False\n\n for i in range(3):\n # normal multiply\n if funcs[0] in [torch.mm, torch.matmul]:\n A = torch.randn(size=dimA, device=\"cuda\", requires_grad=req_grad[0], dtype=dtype)\n B = torch.randn(size=dimB, device=\"cuda\", requires_grad=req_grad[1], dtype=dtype)\n target = torch.randn(size=(dim2, dim4), device=\"cuda\", requires_grad=req_grad[1], dtype=dtype)\n\n torch.nn.init.xavier_uniform_(B)\n\n fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(A.device)\n bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(A.device)\n\n if not transpose[0] and transpose[1]:\n out_torch = funcs[0](A, B.t())\n out_bnb = funcs[1](A, B.t(), fw_code, bw_code)\n elif not transpose[0] and not transpose[1]:\n out_torch = funcs[0](A, B)\n out_bnb = funcs[1](A, B, fw_code, bw_code)\n\n assert out_bnb.dtype == A.dtype, f\"bnb matmullt received {A.dtype} but returned {out_bnb.dtype}\"\n\n n = out_bnb.numel()\n err = torch.abs(out_bnb - out_torch).float().mean().item()\n if n > 0:\n assert err < 0.115\n # assert err < 0.20\n if any(req_grad):\n out_bnb.data.copy_(out_torch)\n torch.cuda.synchronize()\n loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean()\n loss_bnb.backward()\n gradA1 = A.grad\n gradB1 = B.grad\n A.grad = None\n B.grad = None\n\n loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean()\n loss_torch.backward()\n gradA2 = A.grad\n gradB2 = B.grad\n A.grad = None\n B.grad = None\n\n if req_grad[0]:\n torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1)\n\n if req_grad[1]:\n n = gradB1.numel()\n if dim2 > 0:\n assert torch.abs(gradB1).sum() > 0.0\n assert torch.abs(gradB2).sum() > 0.0\n else:\n assert torch.abs(gradB1).sum() == 0.0\n assert torch.abs(gradB2).sum() == 0.0\n idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)\n\n assert (idx == 0).sum().item() <= n * 0.1\n idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.02\n grad_err = (gradB1 - gradB2).abs().mean()\n assert grad_err.item() < 0.003\n torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3)\n\n\n@pytest.mark.deprecated\ndef test_fp8linear():\n b = 10\n h = 1024\n inp = torch.randn(b, h).cuda()\n fp32 = torch.nn.Linear(h, h * 2).cuda()\n fp8 = bnb.research.nn.LinearFP8Mixed(h, h * 2).cuda()\n fp32b = torch.nn.Linear(h * 2, h).cuda()\n fp8b = bnb.research.nn.LinearFP8Mixed(h * 2, h).cuda()\n\n fp8.weight.data.copy_(fp32.weight.data)\n fp8.bias.data.copy_(fp32.bias.data)\n fp8b.weight.data.copy_(fp32b.weight.data)\n fp8b.bias.data.copy_(fp32b.bias.data)\n\n a = fp32b(torch.nn.functional.gelu(fp32(inp)))\n b = fp8b(torch.nn.functional.gelu(fp8(inp)))\n","source_hash":"0af044ebedb84e54edb6e503c4debe8156e01eff3c2fee7958fae251a44a193b","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_deprecated.test_fp8linear","uri":"program://bitsandbytes/function/tests.test_deprecated.test_fp8linear#L148-L175","kind":"function","name":"test_fp8linear","path":"tests/test_deprecated.py","language":"python","start_line":148,"end_line":175,"context_start_line":128,"context_end_line":175,"code":"\n if req_grad[1]:\n n = gradB1.numel()\n if dim2 > 0:\n assert torch.abs(gradB1).sum() > 0.0\n assert torch.abs(gradB2).sum() > 0.0\n else:\n assert torch.abs(gradB1).sum() == 0.0\n assert torch.abs(gradB2).sum() == 0.0\n idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3)\n\n assert (idx == 0).sum().item() <= n * 0.1\n idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3)\n assert (idx == 0).sum().item() <= n * 0.02\n grad_err = (gradB1 - gradB2).abs().mean()\n assert grad_err.item() < 0.003\n torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3)\n\n\n@pytest.mark.deprecated\ndef test_fp8linear():\n b = 10\n h = 1024\n inp = torch.randn(b, h).cuda()\n fp32 = torch.nn.Linear(h, h * 2).cuda()\n fp8 = bnb.research.nn.LinearFP8Mixed(h, h * 2).cuda()\n fp32b = torch.nn.Linear(h * 2, h).cuda()\n fp8b = bnb.research.nn.LinearFP8Mixed(h * 2, h).cuda()\n\n fp8.weight.data.copy_(fp32.weight.data)\n fp8.bias.data.copy_(fp32.bias.data)\n fp8b.weight.data.copy_(fp32b.weight.data)\n fp8b.bias.data.copy_(fp32b.bias.data)\n\n a = fp32b(torch.nn.functional.gelu(fp32(inp)))\n b = fp8b(torch.nn.functional.gelu(fp8(inp)))\n\n err = (a - b).abs().mean()\n\n a.mean().backward()\n b.mean().backward()\n\n graderr = (fp8.weight.grad - fp32.weight.grad).abs().mean()\n bgraderr = (fp8.bias.grad - fp32.bias.grad).abs().mean()\n\n assert err < 0.05\n assert graderr < 0.00002\n assert bgraderr < 0.00002","source_hash":"0af044ebedb84e54edb6e503c4debe8156e01eff3c2fee7958fae251a44a193b","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation","uri":"program://bitsandbytes/module/tests.test_generation#L1-L122","kind":"module","name":"tests.test_generation","path":"tests/test_generation.py","language":"python","start_line":1,"end_line":122,"context_start_line":1,"context_end_line":122,"code":"from itertools import product\nimport math\n\nimport pytest\nimport torch\n\nfrom tests.helpers import TRUE_FALSE, describe_dtype, id_formatter\n\ntransformers = pytest.importorskip(\"transformers\")\n\n\ndef get_4bit_config():\n return transformers.BitsAndBytesConfig(\n load_in_4bit=True,\n load_in_8bit=False,\n llm_int8_threshold=6.0,\n llm_int8_has_fp16_weight=False,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n )\n\n\ndef get_model_and_tokenizer(config):\n model_name_or_path, quant_type = config\n bnb_config = get_4bit_config()\n if quant_type == \"16bit\":\n bnb_config.load_in_4bit = False\n else:\n bnb_config.bnb_4bit_quant_type = quant_type\n model = transformers.AutoModelForCausalLM.from_pretrained(\n model_name_or_path,\n quantization_config=bnb_config,\n max_memory={0: \"48GB\"},\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n ).eval()\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)\n\n return model, tokenizer\n\n\ndef get_prompt_for_generation_eval(text, add_roles=True):\n description = (\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the user's questions.\"\n )\n if add_roles:\n prompt = f\"{description} ### Human: {text} ### Assistant:\"\n else:\n prompt = f\"{description} {text}\"\n return prompt\n\n\ndef generate(model, tokenizer, text, generation_config, prompt_func=get_prompt_for_generation_eval):\n text = prompt_func(text)\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n outputs = model.generate(inputs=inputs[\"input_ids\"], generation_config=generation_config)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n\nmodels = [\"bigscience/bloom-1b7\"]\ndtypes = [\"nf4\", \"fp4\"]\n\n\n@pytest.fixture(scope=\"session\", params=product(models, dtypes))\ndef model_and_tokenizer(request):\n model, tokenizer = get_model_and_tokenizer(request.param)\n yield request.param, model, tokenizer\n del model\n\n\n@pytest.mark.parametrize(\"DQ\", TRUE_FALSE, ids=id_formatter(\"dq\"))\n@pytest.mark.parametrize(\"inference_kernel\", TRUE_FALSE, ids=id_formatter(\"inference_kernel\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.slow\ndef test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):\n fixture_config, model, tokenizer = model_and_tokenizer\n\n generation_config = transformers.GenerationConfig(\n max_new_tokens=20,\n do_sample=True,\n top_p=0.9,\n temperature=0.7,\n )\n generation_config.max_new_tokens = 20\n\n # text = 'Please write down the first 50 digits of pi.'\n # text = get_prompt_for_generation_eval(text)\n # text += ' Sure, here the first 50 digits of pi: 3.14159'\n n_cases = 6\n text = \"3.14159\"\n if hasattr(model.config, \"quantization_config\"):\n model.config.quantization_config.bnb_4bit_compute_dtype = dtype\n model.config.quantization_config.bnb_4bit_use_double_quant = DQ\n\n if not inference_kernel:\n text = [text] * n_cases\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n x = inputs[\"input_ids\"]\n outputs = []\n if inference_kernel:\n for i in range(n_cases):\n output = model.generate(x, generation_config=generation_config)\n textout = tokenizer.decode(output[0], skip_special_tokens=True)\n outputs.append(textout)\n else:\n outputs = model.generate(x, generation_config=generation_config)\n outputs = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]\n\n assert len(outputs) == n_cases\n failure_count = 0\n for i in range(n_cases):\n if outputs[i][: len(str(math.pi))] != str(math.pi):\n failure_count += 1\n failure_max = 2 if fixture_config[0] == \"huggyllama/llama-7b\" else 4\n if failure_count > failure_max:\n print(math.pi)\n for out in outputs:\n print(out)\n raise ValueError(f\"Failure count: {failure_count}/{n_cases}\")","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation.get_4bit_config","uri":"program://bitsandbytes/function/tests.test_generation.get_4bit_config#L12-L21","kind":"function","name":"get_4bit_config","path":"tests/test_generation.py","language":"python","start_line":12,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"from itertools import product\nimport math\n\nimport pytest\nimport torch\n\nfrom tests.helpers import TRUE_FALSE, describe_dtype, id_formatter\n\ntransformers = pytest.importorskip(\"transformers\")\n\n\ndef get_4bit_config():\n return transformers.BitsAndBytesConfig(\n load_in_4bit=True,\n load_in_8bit=False,\n llm_int8_threshold=6.0,\n llm_int8_has_fp16_weight=False,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n )\n\n\ndef get_model_and_tokenizer(config):\n model_name_or_path, quant_type = config\n bnb_config = get_4bit_config()\n if quant_type == \"16bit\":\n bnb_config.load_in_4bit = False\n else:\n bnb_config.bnb_4bit_quant_type = quant_type\n model = transformers.AutoModelForCausalLM.from_pretrained(\n model_name_or_path,\n quantization_config=bnb_config,\n max_memory={0: \"48GB\"},\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n ).eval()\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)\n\n return model, tokenizer","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation.get_model_and_tokenizer","uri":"program://bitsandbytes/function/tests.test_generation.get_model_and_tokenizer#L24-L41","kind":"function","name":"get_model_and_tokenizer","path":"tests/test_generation.py","language":"python","start_line":24,"end_line":41,"context_start_line":4,"context_end_line":61,"code":"import pytest\nimport torch\n\nfrom tests.helpers import TRUE_FALSE, describe_dtype, id_formatter\n\ntransformers = pytest.importorskip(\"transformers\")\n\n\ndef get_4bit_config():\n return transformers.BitsAndBytesConfig(\n load_in_4bit=True,\n load_in_8bit=False,\n llm_int8_threshold=6.0,\n llm_int8_has_fp16_weight=False,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n )\n\n\ndef get_model_and_tokenizer(config):\n model_name_or_path, quant_type = config\n bnb_config = get_4bit_config()\n if quant_type == \"16bit\":\n bnb_config.load_in_4bit = False\n else:\n bnb_config.bnb_4bit_quant_type = quant_type\n model = transformers.AutoModelForCausalLM.from_pretrained(\n model_name_or_path,\n quantization_config=bnb_config,\n max_memory={0: \"48GB\"},\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n ).eval()\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)\n\n return model, tokenizer\n\n\ndef get_prompt_for_generation_eval(text, add_roles=True):\n description = (\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the user's questions.\"\n )\n if add_roles:\n prompt = f\"{description} ### Human: {text} ### Assistant:\"\n else:\n prompt = f\"{description} {text}\"\n return prompt\n\n\ndef generate(model, tokenizer, text, generation_config, prompt_func=get_prompt_for_generation_eval):\n text = prompt_func(text)\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n outputs = model.generate(inputs=inputs[\"input_ids\"], generation_config=generation_config)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\n","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation.get_prompt_for_generation_eval","uri":"program://bitsandbytes/function/tests.test_generation.get_prompt_for_generation_eval#L44-L53","kind":"function","name":"get_prompt_for_generation_eval","path":"tests/test_generation.py","language":"python","start_line":44,"end_line":53,"context_start_line":24,"context_end_line":73,"code":"def get_model_and_tokenizer(config):\n model_name_or_path, quant_type = config\n bnb_config = get_4bit_config()\n if quant_type == \"16bit\":\n bnb_config.load_in_4bit = False\n else:\n bnb_config.bnb_4bit_quant_type = quant_type\n model = transformers.AutoModelForCausalLM.from_pretrained(\n model_name_or_path,\n quantization_config=bnb_config,\n max_memory={0: \"48GB\"},\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n ).eval()\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)\n\n return model, tokenizer\n\n\ndef get_prompt_for_generation_eval(text, add_roles=True):\n description = (\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the user's questions.\"\n )\n if add_roles:\n prompt = f\"{description} ### Human: {text} ### Assistant:\"\n else:\n prompt = f\"{description} {text}\"\n return prompt\n\n\ndef generate(model, tokenizer, text, generation_config, prompt_func=get_prompt_for_generation_eval):\n text = prompt_func(text)\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n outputs = model.generate(inputs=inputs[\"input_ids\"], generation_config=generation_config)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n\nmodels = [\"bigscience/bloom-1b7\"]\ndtypes = [\"nf4\", \"fp4\"]\n\n\n@pytest.fixture(scope=\"session\", params=product(models, dtypes))\ndef model_and_tokenizer(request):\n model, tokenizer = get_model_and_tokenizer(request.param)\n yield request.param, model, tokenizer\n del model\n\n","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation.generate","uri":"program://bitsandbytes/function/tests.test_generation.generate#L56-L60","kind":"function","name":"generate","path":"tests/test_generation.py","language":"python","start_line":56,"end_line":60,"context_start_line":36,"context_end_line":80,"code":" torch_dtype=torch.bfloat16,\n ).eval()\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)\n\n return model, tokenizer\n\n\ndef get_prompt_for_generation_eval(text, add_roles=True):\n description = (\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the user's questions.\"\n )\n if add_roles:\n prompt = f\"{description} ### Human: {text} ### Assistant:\"\n else:\n prompt = f\"{description} {text}\"\n return prompt\n\n\ndef generate(model, tokenizer, text, generation_config, prompt_func=get_prompt_for_generation_eval):\n text = prompt_func(text)\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n outputs = model.generate(inputs=inputs[\"input_ids\"], generation_config=generation_config)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n\nmodels = [\"bigscience/bloom-1b7\"]\ndtypes = [\"nf4\", \"fp4\"]\n\n\n@pytest.fixture(scope=\"session\", params=product(models, dtypes))\ndef model_and_tokenizer(request):\n model, tokenizer = get_model_and_tokenizer(request.param)\n yield request.param, model, tokenizer\n del model\n\n\n@pytest.mark.parametrize(\"DQ\", TRUE_FALSE, ids=id_formatter(\"dq\"))\n@pytest.mark.parametrize(\"inference_kernel\", TRUE_FALSE, ids=id_formatter(\"inference_kernel\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.slow\ndef test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):\n fixture_config, model, tokenizer = model_and_tokenizer\n","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation.model_and_tokenizer","uri":"program://bitsandbytes/function/tests.test_generation.model_and_tokenizer#L68-L71","kind":"function","name":"model_and_tokenizer","path":"tests/test_generation.py","language":"python","start_line":68,"end_line":71,"context_start_line":48,"context_end_line":91,"code":" )\n if add_roles:\n prompt = f\"{description} ### Human: {text} ### Assistant:\"\n else:\n prompt = f\"{description} {text}\"\n return prompt\n\n\ndef generate(model, tokenizer, text, generation_config, prompt_func=get_prompt_for_generation_eval):\n text = prompt_func(text)\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n outputs = model.generate(inputs=inputs[\"input_ids\"], generation_config=generation_config)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n\nmodels = [\"bigscience/bloom-1b7\"]\ndtypes = [\"nf4\", \"fp4\"]\n\n\n@pytest.fixture(scope=\"session\", params=product(models, dtypes))\ndef model_and_tokenizer(request):\n model, tokenizer = get_model_and_tokenizer(request.param)\n yield request.param, model, tokenizer\n del model\n\n\n@pytest.mark.parametrize(\"DQ\", TRUE_FALSE, ids=id_formatter(\"dq\"))\n@pytest.mark.parametrize(\"inference_kernel\", TRUE_FALSE, ids=id_formatter(\"inference_kernel\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.slow\ndef test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):\n fixture_config, model, tokenizer = model_and_tokenizer\n\n generation_config = transformers.GenerationConfig(\n max_new_tokens=20,\n do_sample=True,\n top_p=0.9,\n temperature=0.7,\n )\n generation_config.max_new_tokens = 20\n\n # text = 'Please write down the first 50 digits of pi.'\n # text = get_prompt_for_generation_eval(text)\n # text += ' Sure, here the first 50 digits of pi: 3.14159'","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_generation.test_pi","uri":"program://bitsandbytes/function/tests.test_generation.test_pi#L78-L122","kind":"function","name":"test_pi","path":"tests/test_generation.py","language":"python","start_line":78,"end_line":122,"context_start_line":58,"context_end_line":122,"code":" inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n outputs = model.generate(inputs=inputs[\"input_ids\"], generation_config=generation_config)\n return tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n\nmodels = [\"bigscience/bloom-1b7\"]\ndtypes = [\"nf4\", \"fp4\"]\n\n\n@pytest.fixture(scope=\"session\", params=product(models, dtypes))\ndef model_and_tokenizer(request):\n model, tokenizer = get_model_and_tokenizer(request.param)\n yield request.param, model, tokenizer\n del model\n\n\n@pytest.mark.parametrize(\"DQ\", TRUE_FALSE, ids=id_formatter(\"dq\"))\n@pytest.mark.parametrize(\"inference_kernel\", TRUE_FALSE, ids=id_formatter(\"inference_kernel\"))\n@pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.slow\ndef test_pi(requires_cuda, model_and_tokenizer, inference_kernel, DQ, dtype):\n fixture_config, model, tokenizer = model_and_tokenizer\n\n generation_config = transformers.GenerationConfig(\n max_new_tokens=20,\n do_sample=True,\n top_p=0.9,\n temperature=0.7,\n )\n generation_config.max_new_tokens = 20\n\n # text = 'Please write down the first 50 digits of pi.'\n # text = get_prompt_for_generation_eval(text)\n # text += ' Sure, here the first 50 digits of pi: 3.14159'\n n_cases = 6\n text = \"3.14159\"\n if hasattr(model.config, \"quantization_config\"):\n model.config.quantization_config.bnb_4bit_compute_dtype = dtype\n model.config.quantization_config.bnb_4bit_use_double_quant = DQ\n\n if not inference_kernel:\n text = [text] * n_cases\n inputs = tokenizer(text, return_tensors=\"pt\").to(\"cuda:0\")\n x = inputs[\"input_ids\"]\n outputs = []\n if inference_kernel:\n for i in range(n_cases):\n output = model.generate(x, generation_config=generation_config)\n textout = tokenizer.decode(output[0], skip_special_tokens=True)\n outputs.append(textout)\n else:\n outputs = model.generate(x, generation_config=generation_config)\n outputs = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]\n\n assert len(outputs) == n_cases\n failure_count = 0\n for i in range(n_cases):\n if outputs[i][: len(str(math.pi))] != str(math.pi):\n failure_count += 1\n failure_max = 2 if fixture_config[0] == \"huggyllama/llama-7b\" else 4\n if failure_count > failure_max:\n print(math.pi)\n for out in outputs:\n print(out)\n raise ValueError(f\"Failure count: {failure_count}/{n_cases}\")","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_triton","uri":"program://bitsandbytes/module/tests.test_triton#L1-L64","kind":"module","name":"tests.test_triton","path":"tests/test_triton.py","language":"python","start_line":1,"end_line":64,"context_start_line":1,"context_end_line":64,"code":"import pytest\nimport torch\n\nfrom bitsandbytes.nn import Linear8bitLt\nfrom bitsandbytes.nn.triton_based_modules import SwitchBackLinear\nfrom bitsandbytes.triton.triton_utils import is_triton_available\nfrom tests.helpers import TRUE_FALSE\n\n\n@pytest.mark.skipif(\n not is_triton_available() or not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] >= 8,\n reason=\"This test requires triton and a GPU with compute capability 8.0 or higher.\",\n)\n@pytest.mark.deprecated\n@pytest.mark.parametrize(\"vector_wise_quantization\", TRUE_FALSE)\ndef test_switchback(vector_wise_quantization):\n for dim in [83]:\n for batch in [13]:\n standard = torch.nn.Linear(dim, 4 * dim).cuda().half()\n switchback = (\n SwitchBackLinear(dim, 4 * dim, vector_wise_quantization=vector_wise_quantization).cuda().half()\n )\n baseline = Linear8bitLt(dim, 4 * dim).cuda().half()\n switchback.weight.data.copy_(standard.weight)\n switchback.bias.data.copy_(standard.bias)\n baseline.weight.data.copy_(standard.weight)\n baseline.bias.data.copy_(standard.bias)\n\n x1 = torch.randn(batch, dim).cuda().half().requires_grad_(True)\n x2 = x1.clone().detach().requires_grad_(True)\n x3 = x1.clone().detach().requires_grad_(True)\n\n out_standard = standard(x1)\n (2**10 * out_standard.abs().mean()).backward()\n\n print(x2.dtype)\n out_sb = switchback(x2)\n (2**10 * out_sb.abs().mean()).backward()\n\n out_baseline = baseline(x3)\n (2**10 * out_baseline.abs().mean()).backward()\n\n err_sb = (out_standard - out_sb).abs().mean()\n err_baseline = (out_standard - out_baseline).abs().mean()\n print(\"OUT\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline\n\n err_sb = (standard.bias.grad - switchback.bias.grad).abs().mean()\n err_baseline = (standard.bias.grad - baseline.bias.grad).abs().mean()\n\n print(\"GW2\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline\n\n err_sb = (standard.weight.grad - switchback.weight.grad).abs().mean()\n err_baseline = (standard.weight.grad - baseline.weight.grad).abs().mean()\n\n print(\"GW1\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline\n\n err_sb = (x1.grad - x2.grad).abs().mean()\n err_baseline = (x1.grad - x3.grad).abs().mean()\n\n print(\"GX1\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline","source_hash":"e062201eb80ecb8b33be857dd321227f5ed900e07f751fda0b3f0baf21428e94","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_triton.test_switchback","uri":"program://bitsandbytes/function/tests.test_triton.test_switchback#L16-L64","kind":"function","name":"test_switchback","path":"tests/test_triton.py","language":"python","start_line":16,"end_line":64,"context_start_line":1,"context_end_line":64,"code":"import pytest\nimport torch\n\nfrom bitsandbytes.nn import Linear8bitLt\nfrom bitsandbytes.nn.triton_based_modules import SwitchBackLinear\nfrom bitsandbytes.triton.triton_utils import is_triton_available\nfrom tests.helpers import TRUE_FALSE\n\n\n@pytest.mark.skipif(\n not is_triton_available() or not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] >= 8,\n reason=\"This test requires triton and a GPU with compute capability 8.0 or higher.\",\n)\n@pytest.mark.deprecated\n@pytest.mark.parametrize(\"vector_wise_quantization\", TRUE_FALSE)\ndef test_switchback(vector_wise_quantization):\n for dim in [83]:\n for batch in [13]:\n standard = torch.nn.Linear(dim, 4 * dim).cuda().half()\n switchback = (\n SwitchBackLinear(dim, 4 * dim, vector_wise_quantization=vector_wise_quantization).cuda().half()\n )\n baseline = Linear8bitLt(dim, 4 * dim).cuda().half()\n switchback.weight.data.copy_(standard.weight)\n switchback.bias.data.copy_(standard.bias)\n baseline.weight.data.copy_(standard.weight)\n baseline.bias.data.copy_(standard.bias)\n\n x1 = torch.randn(batch, dim).cuda().half().requires_grad_(True)\n x2 = x1.clone().detach().requires_grad_(True)\n x3 = x1.clone().detach().requires_grad_(True)\n\n out_standard = standard(x1)\n (2**10 * out_standard.abs().mean()).backward()\n\n print(x2.dtype)\n out_sb = switchback(x2)\n (2**10 * out_sb.abs().mean()).backward()\n\n out_baseline = baseline(x3)\n (2**10 * out_baseline.abs().mean()).backward()\n\n err_sb = (out_standard - out_sb).abs().mean()\n err_baseline = (out_standard - out_baseline).abs().mean()\n print(\"OUT\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline\n\n err_sb = (standard.bias.grad - switchback.bias.grad).abs().mean()\n err_baseline = (standard.bias.grad - baseline.bias.grad).abs().mean()\n\n print(\"GW2\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline\n\n err_sb = (standard.weight.grad - switchback.weight.grad).abs().mean()\n err_baseline = (standard.weight.grad - baseline.weight.grad).abs().mean()\n\n print(\"GW1\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline\n\n err_sb = (x1.grad - x2.grad).abs().mean()\n err_baseline = (x1.grad - x3.grad).abs().mean()\n\n print(\"GX1\", err_sb, err_baseline)\n assert err_sb < 2 * err_baseline","source_hash":"e062201eb80ecb8b33be857dd321227f5ed900e07f751fda0b3f0baf21428e94","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional","uri":"program://bitsandbytes/module/tests.test_functional#L1-L1459","kind":"module","name":"tests.test_functional","path":"tests/test_functional.py","language":"python","start_line":1,"end_line":1459,"context_start_line":1,"context_end_line":1459,"code":"import math\nimport platform\nimport random\nimport time\n\nimport einops\nfrom packaging import version\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, ROCM_GPU_ARCH\nfrom tests.helpers import (\n BOOLEAN_TUPLES,\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n get_test_dims,\n id_formatter,\n is_supported_on_hpu,\n)\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\nk = 20\n\n\ndef assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n if throw:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n return sumval\n\n\nclass FFN(torch.nn.Module):\n def __init__(self, input_features, hidden_size, bias=True):\n super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()\n ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)\n\n if print_ms and name in self.agg:\n print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n print(\"Resetting benchmark data\")\n\n\nclass Test8BitBlockwiseQuantizeFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"nested\", TRUE_FALSE, ids=id_formatter(\"nested\"))\n @pytest.mark.parametrize(\n \"blocksize\",\n [4096, 2048, 1024, 512, 256, 128, 64] if not HIP_ENVIRONMENT else [4096, 2048, 1024, 512, 256, 128],\n )\n @pytest.mark.parametrize(\"signed\", TRUE_FALSE, ids=id_formatter(\"signed\"))\n def test_dynamic_blockwise_quantization(self, device, dtype, nested, blocksize, signed):\n iters = 100\n\n if device != \"cuda\":\n iters = 10\n\n # This test is slow in our non-CUDA implementations, so avoid atypical use cases.\n if nested:\n pytest.skip(\"Not a typical use case.\")\n if blocksize != 256:\n pytest.skip(\"Only blocksize 256 is used in CPU/MPS/XPU\")\n if dtype != torch.float32:\n pytest.skip(\"Only float32 is used in CPU/MPS/XPU\")\n\n diffs = []\n reldiffs = []\n for i in range(iters):\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)\n A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n assert abserr < 0.011\n assert relerr < 0.018\n assert A2.dtype == dtype\n\n diffs = []\n code = F.create_dynamic_map(signed=signed)\n for i in range(iters):\n A1 = torch.rand(1024, 1024, device=device, dtype=dtype)\n C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested, code=code)\n A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n # torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n if signed:\n threshold_abserr = 0.0035\n assert abserr < 0.0036\n assert relerr < 0.015\n else:\n assert abserr < 0.0023\n assert relerr < 0.012\n assert A2.dtype == dtype\n\n @pytest.mark.skipif(\"cpu\" not in get_available_devices(), reason=\"CPU is required\")\n @pytest.mark.parametrize(\"hidden\", [128])\n @pytest.mark.parametrize(\"blocksize\", [4096, 16384])\n def test_blockwise_cpu_large(self, hidden, blocksize):\n diffs = []\n reldiffs = []\n batch = 128\n seq = 128\n\n for i in range(2):\n A1 = torch.randn(batch, seq, hidden, device=\"cpu\")\n t0 = time.time()\n C, S = F.quantize_blockwise(A1, blocksize=blocksize)\n A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)\n print(time.time() - t0)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diffs[-1] < 0.011\n # print(sum(diffs)/len(diffs))\n # print(sum(reldiffs)/len(reldiffs))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"bits\", range(2, 9), ids=id_formatter(\"bits\"))\n @pytest.mark.parametrize(\"method\", [\"linear\", \"fp8\", \"dynamic\"])\n def test_few_bit_quant(self, device, bits, method):\n if bits != 8 and device == \"cpu\":\n pytest.skip(\"CPU implementation only supports 8 bits\")\n\n abserrs = []\n relerrs = []\n code = None\n if method == \"linear\":\n code = F.create_linear_map(True, total_bits=bits).to(device)\n elif method == \"fp8\":\n ebits = math.ceil(bits / 2)\n pbits = bits - ebits - 1\n code = F.create_fp8_map(True, ebits, pbits, bits).to(device)\n elif method == \"dynamic\":\n code = F.create_dynamic_map(True, bits - 0, bits).to(device)\n\n # for some data types we have no zero\n # for some data types we have one zero\n # for some data types we have two zeros\n assert torch.unique(code).numel() in [2**bits, 2**bits - 1], f\"bits: {bits}, method: {method}\"\n # print(method, (code==0).sum())\n assert code.numel() == 256\n for i in range(10):\n values = torch.randn(1, 32, device=device)\n values /= values.abs().max()\n # values[values.abs() < 1e-6] += 1e-5\n\n q1 = []\n v1 = []\n for v in values[0]:\n idx = torch.abs(v - code).argmin()\n q1.append(idx.item())\n v1.append(code[idx].item())\n\n q1 = torch.tensor(q1, device=device)\n v1 = torch.tensor(v1, device=device)\n\n q2, S2 = F.quantize_blockwise(values, code=code)\n v2 = F.dequantize_blockwise(q2, S2)\n\n idx = torch.isclose(q1.int(), q2.int())\n err2 = torch.abs(v2 - values)\n abserrs.append(err2.mean().item())\n relerrs.append((err2 / (1e-10 + values).abs()).mean().item())\n if idx.sum():\n # some weird cases\n err1 = torch.abs(v1 - values).mean()\n # assert err2.mean() <= err1\n else:\n torch.testing.assert_close(q1, q2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_fp8_quant(self, device):\n # TODO\n if device == \"cpu\":\n pytest.skip(\"CPU implementation segfaults\")\n\n for e_bits in range(1, 7):\n p_bits = 7 - e_bits\n code = F.create_fp8_map(True, e_bits, p_bits).to(device)\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.rand(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(3, sum(abserr)/len(abserr))\n # print(3, sum(relerr)/len(relerr))\n\n @pytest.mark.benchmark\n def test_bench_dequantization(self):\n a = torch.rand(1024, 1024, device=\"cuda\").half()\n code = F.create_fp8_map(True, 3, 0, 4).cuda()\n qa, SA = F.quantize_blockwise(a, code=code)\n print(qa.max())\n\n max_theoretical_mu = 1024 * 1024 * 2 / 1024**3 / 672 * 1000 * 1000\n # print(max_theoretical_mu)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)\n max1 = torch.tile(max1, (1, 1, x.shape[1]))\n max1 = max1.view(x.shape)\n elif dim == 0:\n x_chunked = einops.rearrange(x, \"a (b c) -> a b c\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)\n max1 = torch.tile(max1, (x.shape[0], 1, 1))\n max1 = max1.view(x.shape)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef mean(xx):\n return sum(xx) / float(len(xx))\n\n\nmethods = {\n \"linear\": (\n lambda x, dim: quant(x),\n lambda x, dim: quant(x),\n dequant,\n dequant,\n mm_dequant,\n ),\n \"vectorwise\": (quant_multi, quant_multi, dequant, dequant, mm_dequant),\n}\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestIGEMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [1024 * 2], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024 * 16], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"quant_methods\", methods.values(), ids=methods.keys())\n @pytest.mark.parametrize(\"batched\", TRUE_FALSE, ids=id_formatter(\"batched\"))\n def test_approx_igemm(self, dim1, dim2, quant_methods, batched):\n dim1 = dim1 - (dim1 % 32)\n dim2 = dim2 - (dim2 % 32)\n errors = []\n relerrors = []\n # print(\"\")\n for i in range(5):\n if batched:\n A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device=\"cuda\")\n B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device=\"cuda\")\n maxA, Ac = quant_methods[0](A, 2)\n maxB, Bc = quant_methods[1](B, 1)\n else:\n A = torch.normal(0, 0.5, size=(dim1, dim2), device=\"cuda\")\n B = torch.normal(0, 0.5, size=(dim2, dim1), device=\"cuda\")\n maxA, Ac = quant_methods[0](A, 1)\n maxB, Bc = quant_methods[1](B, 0)\n torch.testing.assert_close(quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05)\n if batched:\n out2 = torch.bmm(A, B)\n C = torch.bmm(Ac.float(), Bc.float())\n else:\n out2 = torch.mm(A, B)\n C = F.igemm(Ac, Bc)\n out = quant_methods[4](maxA, maxB, C)\n std = out2.std()\n out /= std\n out2 /= std\n err = torch.abs(out - out2)\n relerr = err / torch.abs(out2)\n errors.append(err.mean().item())\n relerrors.append(relerr.mean().item())\n # print(mean(errors))\n # print(mean(relerrors))\n\n @pytest.mark.parametrize(\"hidden_dim\", [32, 256], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [16, 256], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"seq_dim\", [16, 256], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_igemm(self, hidden_dim, batch_dim, transpose, seq_dim):\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 16)\n seq_dim = seq_dim - (seq_dim % 16)\n for i in range(k):\n shapeA = (batch_dim, hidden_dim) if not transpose[0] else (hidden_dim, batch_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n elif transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.t().float(), B.float())\n out = F.igemm(A.t(), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.matmul(A.t().float(), B.t().float())\n out = F.igemm(A.t(), B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n for i in range(k):\n shapeA = (batch_dim, seq_dim, hidden_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 256, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [64, 1024, 4096], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 8, 16], ids=id_formatter(\"batch_dim\"))\n def test_dim3_igemm(self, seq_dim, hidden_dim, batch_dim):\n seq_dim = seq_dim - (seq_dim % 32)\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 2)\n for i in range(25):\n A = torch.randint(-128, 127, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=(batch_dim, seq_dim, 1024), device=\"cuda\").to(torch.int8)\n out2 = torch.einsum(\"bsi, bso->io\", A.float(), B.float())\n iout = torch.empty(A.shape[2], B.shape[2], dtype=torch.int32, device=A.device)\n out = F.igemm(A, B, out=iout)\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [32, 1024 * 4], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 16], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"transpose\", TRUE_FALSE, ids=id_formatter(\"transpose\"))\n def test_minmax_igemm(self, seq_dim, hidden_dim, batch_dim, transpose):\n def min_max(x):\n maxA = torch.amax(x, dim=2, keepdim=True)\n minA = torch.amin(x, dim=2, keepdim=True)\n scale = (maxA - minA) / 2.0\n return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale\n\n seq_dim = seq_dim - (seq_dim % 16)\n hidden_dim = hidden_dim - (hidden_dim % 16)\n batch_dim = batch_dim - (batch_dim % 2)\n errs = []\n relerrs = []\n errs2 = []\n relerrs2 = []\n for i in range(k):\n A = torch.normal(0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\")\n if transpose:\n B = torch.normal(0, 0.5, size=(256, hidden_dim), device=\"cuda\")\n else:\n B = torch.normal(0, 0.5, size=(hidden_dim, 256), device=\"cuda\")\n Ac, minA, scale = min_max(A)\n if transpose:\n maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))\n out = F.igemm(Ac, Bc.t())\n out2 = torch.matmul(A, B.t())\n offset = B.t().sum(0) * (minA + scale)\n out = out.float()\n out = (out * maxB.t() * scale / (127 * 127)) + offset\n\n maxA, Ac = quant_multi(A, dim=2)\n out3 = F.igemm(Ac, Bc.t())\n out3 = mm_dequant(maxA, maxB.t(), out3)\n else:\n maxB, Bc = quant_multi(B, dim=0)\n offset = B.sum(0) * (minA + scale)\n out = F.igemm(Ac, Bc)\n out2 = torch.matmul(A, B)\n out = out.float()\n out = (out * maxB * scale / (127 * 127)) + offset\n\n maxA, Ac = quant_multi(A, dim=2)\n out3 = F.igemm(Ac, Bc)\n out3 = mm_dequant(maxA, maxB, out3)\n\n std = out2.std()\n out2 /= std\n# ... truncated ...","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.assert_all_approx_close","uri":"program://bitsandbytes/function/tests.test_functional.assert_all_approx_close#L28-L36","kind":"function","name":"assert_all_approx_close","path":"tests/test_functional.py","language":"python","start_line":28,"end_line":36,"context_start_line":8,"context_end_line":56,"code":"import pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, ROCM_GPU_ARCH\nfrom tests.helpers import (\n BOOLEAN_TUPLES,\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n get_test_dims,\n id_formatter,\n is_supported_on_hpu,\n)\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\nk = 20\n\n\ndef assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n if throw:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n return sumval\n\n\nclass FFN(torch.nn.Module):\n def __init__(self, input_features, hidden_size, bias=True):\n super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.FFN","uri":"program://bitsandbytes/class/tests.test_functional.FFN#L39-L52","kind":"class","name":"FFN","path":"tests/test_functional.py","language":"python","start_line":39,"end_line":52,"context_start_line":19,"context_end_line":72,"code":" get_test_dims,\n id_formatter,\n is_supported_on_hpu,\n)\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\nk = 20\n\n\ndef assert_all_approx_close(a, b, rtol=1e-3, atol=1e-3, count=0, throw=True):\n idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n if throw:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n return sumval\n\n\nclass FFN(torch.nn.Module):\n def __init__(self, input_features, hidden_size, bias=True):\n super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.Timer","uri":"program://bitsandbytes/class/tests.test_functional.Timer#L55-L90","kind":"class","name":"Timer","path":"tests/test_functional.py","language":"python","start_line":55,"end_line":90,"context_start_line":35,"context_end_line":110,"code":"\n return sumval\n\n\nclass FFN(torch.nn.Module):\n def __init__(self, input_features, hidden_size, bias=True):\n super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()\n ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)\n\n if print_ms and name in self.agg:\n print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n print(\"Resetting benchmark data\")\n\n\nclass Test8BitBlockwiseQuantizeFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"nested\", TRUE_FALSE, ids=id_formatter(\"nested\"))\n @pytest.mark.parametrize(\n \"blocksize\",\n [4096, 2048, 1024, 512, 256, 128, 64] if not HIP_ENVIRONMENT else [4096, 2048, 1024, 512, 256, 128],\n )\n @pytest.mark.parametrize(\"signed\", TRUE_FALSE, ids=id_formatter(\"signed\"))\n def test_dynamic_blockwise_quantization(self, device, dtype, nested, blocksize, signed):\n iters = 100\n\n if device != \"cuda\":\n iters = 10\n\n # This test is slow in our non-CUDA implementations, so avoid atypical use cases.\n if nested:\n pytest.skip(\"Not a typical use case.\")","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.Test8BitBlockwiseQuantizeFunctional","uri":"program://bitsandbytes/class/tests.test_functional.Test8BitBlockwiseQuantizeFunctional#L93-L297","kind":"class","name":"Test8BitBlockwiseQuantizeFunctional","path":"tests/test_functional.py","language":"python","start_line":93,"end_line":297,"context_start_line":73,"context_end_line":317,"code":" ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)\n\n if print_ms and name in self.agg:\n print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n print(\"Resetting benchmark data\")\n\n\nclass Test8BitBlockwiseQuantizeFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"nested\", TRUE_FALSE, ids=id_formatter(\"nested\"))\n @pytest.mark.parametrize(\n \"blocksize\",\n [4096, 2048, 1024, 512, 256, 128, 64] if not HIP_ENVIRONMENT else [4096, 2048, 1024, 512, 256, 128],\n )\n @pytest.mark.parametrize(\"signed\", TRUE_FALSE, ids=id_formatter(\"signed\"))\n def test_dynamic_blockwise_quantization(self, device, dtype, nested, blocksize, signed):\n iters = 100\n\n if device != \"cuda\":\n iters = 10\n\n # This test is slow in our non-CUDA implementations, so avoid atypical use cases.\n if nested:\n pytest.skip(\"Not a typical use case.\")\n if blocksize != 256:\n pytest.skip(\"Only blocksize 256 is used in CPU/MPS/XPU\")\n if dtype != torch.float32:\n pytest.skip(\"Only float32 is used in CPU/MPS/XPU\")\n\n diffs = []\n reldiffs = []\n for i in range(iters):\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)\n A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n assert abserr < 0.011\n assert relerr < 0.018\n assert A2.dtype == dtype\n\n diffs = []\n code = F.create_dynamic_map(signed=signed)\n for i in range(iters):\n A1 = torch.rand(1024, 1024, device=device, dtype=dtype)\n C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested, code=code)\n A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n # torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n if signed:\n threshold_abserr = 0.0035\n assert abserr < 0.0036\n assert relerr < 0.015\n else:\n assert abserr < 0.0023\n assert relerr < 0.012\n assert A2.dtype == dtype\n\n @pytest.mark.skipif(\"cpu\" not in get_available_devices(), reason=\"CPU is required\")\n @pytest.mark.parametrize(\"hidden\", [128])\n @pytest.mark.parametrize(\"blocksize\", [4096, 16384])\n def test_blockwise_cpu_large(self, hidden, blocksize):\n diffs = []\n reldiffs = []\n batch = 128\n seq = 128\n\n for i in range(2):\n A1 = torch.randn(batch, seq, hidden, device=\"cpu\")\n t0 = time.time()\n C, S = F.quantize_blockwise(A1, blocksize=blocksize)\n A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)\n print(time.time() - t0)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diffs[-1] < 0.011\n # print(sum(diffs)/len(diffs))\n # print(sum(reldiffs)/len(reldiffs))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"bits\", range(2, 9), ids=id_formatter(\"bits\"))\n @pytest.mark.parametrize(\"method\", [\"linear\", \"fp8\", \"dynamic\"])\n def test_few_bit_quant(self, device, bits, method):\n if bits != 8 and device == \"cpu\":\n pytest.skip(\"CPU implementation only supports 8 bits\")\n\n abserrs = []\n relerrs = []\n code = None\n if method == \"linear\":\n code = F.create_linear_map(True, total_bits=bits).to(device)\n elif method == \"fp8\":\n ebits = math.ceil(bits / 2)\n pbits = bits - ebits - 1\n code = F.create_fp8_map(True, ebits, pbits, bits).to(device)\n elif method == \"dynamic\":\n code = F.create_dynamic_map(True, bits - 0, bits).to(device)\n\n # for some data types we have no zero\n # for some data types we have one zero\n # for some data types we have two zeros\n assert torch.unique(code).numel() in [2**bits, 2**bits - 1], f\"bits: {bits}, method: {method}\"\n # print(method, (code==0).sum())\n assert code.numel() == 256\n for i in range(10):\n values = torch.randn(1, 32, device=device)\n values /= values.abs().max()\n # values[values.abs() < 1e-6] += 1e-5\n\n q1 = []\n v1 = []\n for v in values[0]:\n idx = torch.abs(v - code).argmin()\n q1.append(idx.item())\n v1.append(code[idx].item())\n\n q1 = torch.tensor(q1, device=device)\n v1 = torch.tensor(v1, device=device)\n\n q2, S2 = F.quantize_blockwise(values, code=code)\n v2 = F.dequantize_blockwise(q2, S2)\n\n idx = torch.isclose(q1.int(), q2.int())\n err2 = torch.abs(v2 - values)\n abserrs.append(err2.mean().item())\n relerrs.append((err2 / (1e-10 + values).abs()).mean().item())\n if idx.sum():\n # some weird cases\n err1 = torch.abs(v1 - values).mean()\n # assert err2.mean() <= err1\n else:\n torch.testing.assert_close(q1, q2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_fp8_quant(self, device):\n # TODO\n if device == \"cpu\":\n pytest.skip(\"CPU implementation segfaults\")\n\n for e_bits in range(1, 7):\n p_bits = 7 - e_bits\n code = F.create_fp8_map(True, e_bits, p_bits).to(device)\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.rand(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(3, sum(abserr)/len(abserr))\n # print(3, sum(relerr)/len(relerr))\n\n @pytest.mark.benchmark\n def test_bench_dequantization(self):\n a = torch.rand(1024, 1024, device=\"cuda\").half()\n code = F.create_fp8_map(True, 3, 0, 4).cuda()\n qa, SA = F.quantize_blockwise(a, code=code)\n print(qa.max())\n\n max_theoretical_mu = 1024 * 1024 * 2 / 1024**3 / 672 * 1000 * 1000\n # print(max_theoretical_mu)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_stable_embedding","uri":"program://bitsandbytes/function/tests.test_functional.test_stable_embedding#L301-L303","kind":"function","name":"test_stable_embedding","path":"tests/test_functional.py","language":"python","start_line":301,"end_line":303,"context_start_line":281,"context_end_line":323,"code":" # print(3, sum(relerr)/len(relerr))\n\n @pytest.mark.benchmark\n def test_bench_dequantization(self):\n a = torch.rand(1024, 1024, device=\"cuda\").half()\n code = F.create_fp8_map(True, 3, 0, 4).cuda()\n qa, SA = F.quantize_blockwise(a, code=code)\n print(qa.max())\n\n max_theoretical_mu = 1024 * 1024 * 2 / 1024**3 / 672 * 1000 * 1000\n # print(max_theoretical_mu)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.quant","uri":"program://bitsandbytes/function/tests.test_functional.quant#L306-L309","kind":"function","name":"quant","path":"tests/test_functional.py","language":"python","start_line":306,"end_line":309,"context_start_line":286,"context_end_line":329,"code":" code = F.create_fp8_map(True, 3, 0, 4).cuda()\n qa, SA = F.quantize_blockwise(a, code=code)\n print(qa.max())\n\n max_theoretical_mu = 1024 * 1024 * 2 / 1024**3 / 672 * 1000 * 1000\n # print(max_theoretical_mu)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.dequant","uri":"program://bitsandbytes/function/tests.test_functional.dequant#L312-L313","kind":"function","name":"dequant","path":"tests/test_functional.py","language":"python","start_line":312,"end_line":313,"context_start_line":292,"context_end_line":333,"code":"\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)\n max1 = torch.tile(max1, (1, 1, x.shape[1]))\n max1 = max1.view(x.shape)\n elif dim == 0:","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.mm_dequant","uri":"program://bitsandbytes/function/tests.test_functional.mm_dequant#L316-L317","kind":"function","name":"mm_dequant","path":"tests/test_functional.py","language":"python","start_line":316,"end_line":317,"context_start_line":296,"context_end_line":337,"code":" qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)\n max1 = torch.tile(max1, (1, 1, x.shape[1]))\n max1 = max1.view(x.shape)\n elif dim == 0:\n x_chunked = einops.rearrange(x, \"a (b c) -> a b c\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)\n max1 = torch.tile(max1, (x.shape[0], 1, 1))\n max1 = max1.view(x.shape)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.quant_multi","uri":"program://bitsandbytes/function/tests.test_functional.quant_multi#L320-L324","kind":"function","name":"quant_multi","path":"tests/test_functional.py","language":"python","start_line":320,"end_line":324,"context_start_line":300,"context_end_line":344,"code":"\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)\n max1 = torch.tile(max1, (1, 1, x.shape[1]))\n max1 = max1.view(x.shape)\n elif dim == 0:\n x_chunked = einops.rearrange(x, \"a (b c) -> a b c\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)\n max1 = torch.tile(max1, (x.shape[0], 1, 1))\n max1 = max1.view(x.shape)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef mean(xx):\n return sum(xx) / float(len(xx))","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.quant_multi_chunk","uri":"program://bitsandbytes/function/tests.test_functional.quant_multi_chunk#L327-L340","kind":"function","name":"quant_multi_chunk","path":"tests/test_functional.py","language":"python","start_line":327,"end_line":340,"context_start_line":307,"context_end_line":360,"code":" max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)\n\n\ndef quant_multi(x, dim):\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)\n max1 = torch.tile(max1, (1, 1, x.shape[1]))\n max1 = max1.view(x.shape)\n elif dim == 0:\n x_chunked = einops.rearrange(x, \"a (b c) -> a b c\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)\n max1 = torch.tile(max1, (x.shape[0], 1, 1))\n max1 = max1.view(x.shape)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef mean(xx):\n return sum(xx) / float(len(xx))\n\n\nmethods = {\n \"linear\": (\n lambda x, dim: quant(x),\n lambda x, dim: quant(x),\n dequant,\n dequant,\n mm_dequant,\n ),\n \"vectorwise\": (quant_multi, quant_multi, dequant, dequant, mm_dequant),\n}\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestIGEMMFunctional:","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.mean","uri":"program://bitsandbytes/function/tests.test_functional.mean#L343-L344","kind":"function","name":"mean","path":"tests/test_functional.py","language":"python","start_line":343,"end_line":344,"context_start_line":323,"context_end_line":364,"code":" x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef quant_multi_chunk(x, dim, chunk_size=32):\n if dim == 1:\n x_chunked = einops.rearrange(x, \"(c a) b -> c a b\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim + 1, keepdim=True)\n max1 = torch.tile(max1, (1, 1, x.shape[1]))\n max1 = max1.view(x.shape)\n elif dim == 0:\n x_chunked = einops.rearrange(x, \"a (b c) -> a b c\", c=chunk_size)\n max1 = torch.amax(torch.abs(x_chunked), dim=dim, keepdim=True)\n max1 = torch.tile(max1, (x.shape[0], 1, 1))\n max1 = max1.view(x.shape)\n max1[max1 == 0] = 1.0\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef mean(xx):\n return sum(xx) / float(len(xx))\n\n\nmethods = {\n \"linear\": (\n lambda x, dim: quant(x),\n lambda x, dim: quant(x),\n dequant,\n dequant,\n mm_dequant,\n ),\n \"vectorwise\": (quant_multi, quant_multi, dequant, dequant, mm_dequant),\n}\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestIGEMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [1024 * 2], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024 * 16], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"quant_methods\", methods.values(), ids=methods.keys())\n @pytest.mark.parametrize(\"batched\", TRUE_FALSE, ids=id_formatter(\"batched\"))","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.TestIGEMMFunctional","uri":"program://bitsandbytes/class/tests.test_functional.TestIGEMMFunctional#L360-L565","kind":"class","name":"TestIGEMMFunctional","path":"tests/test_functional.py","language":"python","start_line":360,"end_line":565,"context_start_line":340,"context_end_line":585,"code":" return max1, x.to(torch.int8)\n\n\ndef mean(xx):\n return sum(xx) / float(len(xx))\n\n\nmethods = {\n \"linear\": (\n lambda x, dim: quant(x),\n lambda x, dim: quant(x),\n dequant,\n dequant,\n mm_dequant,\n ),\n \"vectorwise\": (quant_multi, quant_multi, dequant, dequant, mm_dequant),\n}\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestIGEMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [1024 * 2], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024 * 16], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"quant_methods\", methods.values(), ids=methods.keys())\n @pytest.mark.parametrize(\"batched\", TRUE_FALSE, ids=id_formatter(\"batched\"))\n def test_approx_igemm(self, dim1, dim2, quant_methods, batched):\n dim1 = dim1 - (dim1 % 32)\n dim2 = dim2 - (dim2 % 32)\n errors = []\n relerrors = []\n # print(\"\")\n for i in range(5):\n if batched:\n A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device=\"cuda\")\n B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device=\"cuda\")\n maxA, Ac = quant_methods[0](A, 2)\n maxB, Bc = quant_methods[1](B, 1)\n else:\n A = torch.normal(0, 0.5, size=(dim1, dim2), device=\"cuda\")\n B = torch.normal(0, 0.5, size=(dim2, dim1), device=\"cuda\")\n maxA, Ac = quant_methods[0](A, 1)\n maxB, Bc = quant_methods[1](B, 0)\n torch.testing.assert_close(quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05)\n if batched:\n out2 = torch.bmm(A, B)\n C = torch.bmm(Ac.float(), Bc.float())\n else:\n out2 = torch.mm(A, B)\n C = F.igemm(Ac, Bc)\n out = quant_methods[4](maxA, maxB, C)\n std = out2.std()\n out /= std\n out2 /= std\n err = torch.abs(out - out2)\n relerr = err / torch.abs(out2)\n errors.append(err.mean().item())\n relerrors.append(relerr.mean().item())\n # print(mean(errors))\n # print(mean(relerrors))\n\n @pytest.mark.parametrize(\"hidden_dim\", [32, 256], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [16, 256], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"seq_dim\", [16, 256], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_igemm(self, hidden_dim, batch_dim, transpose, seq_dim):\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 16)\n seq_dim = seq_dim - (seq_dim % 16)\n for i in range(k):\n shapeA = (batch_dim, hidden_dim) if not transpose[0] else (hidden_dim, batch_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n elif transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.t().float(), B.float())\n out = F.igemm(A.t(), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.matmul(A.t().float(), B.t().float())\n out = F.igemm(A.t(), B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n for i in range(k):\n shapeA = (batch_dim, seq_dim, hidden_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 256, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [64, 1024, 4096], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 8, 16], ids=id_formatter(\"batch_dim\"))\n def test_dim3_igemm(self, seq_dim, hidden_dim, batch_dim):\n seq_dim = seq_dim - (seq_dim % 32)\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 2)\n for i in range(25):\n A = torch.randint(-128, 127, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=(batch_dim, seq_dim, 1024), device=\"cuda\").to(torch.int8)\n out2 = torch.einsum(\"bsi, bso->io\", A.float(), B.float())\n iout = torch.empty(A.shape[2], B.shape[2], dtype=torch.int32, device=A.device)\n out = F.igemm(A, B, out=iout)\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [32, 1024 * 4], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 16], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"transpose\", TRUE_FALSE, ids=id_formatter(\"transpose\"))\n def test_minmax_igemm(self, seq_dim, hidden_dim, batch_dim, transpose):\n def min_max(x):\n maxA = torch.amax(x, dim=2, keepdim=True)\n minA = torch.amin(x, dim=2, keepdim=True)\n scale = (maxA - minA) / 2.0\n return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale\n\n seq_dim = seq_dim - (seq_dim % 16)\n hidden_dim = hidden_dim - (hidden_dim % 16)\n batch_dim = batch_dim - (batch_dim % 2)\n errs = []\n relerrs = []\n errs2 = []\n relerrs2 = []\n for i in range(k):\n A = torch.normal(0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\")\n if transpose:\n B = torch.normal(0, 0.5, size=(256, hidden_dim), device=\"cuda\")\n else:\n B = torch.normal(0, 0.5, size=(hidden_dim, 256), device=\"cuda\")\n Ac, minA, scale = min_max(A)\n if transpose:\n maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))\n out = F.igemm(Ac, Bc.t())\n out2 = torch.matmul(A, B.t())\n offset = B.t().sum(0) * (minA + scale)\n out = out.float()\n out = (out * maxB.t() * scale / (127 * 127)) + offset\n\n maxA, Ac = quant_multi(A, dim=2)\n out3 = F.igemm(Ac, Bc.t())\n out3 = mm_dequant(maxA, maxB.t(), out3)\n else:\n maxB, Bc = quant_multi(B, dim=0)\n offset = B.sum(0) * (minA + scale)\n out = F.igemm(Ac, Bc)\n out2 = torch.matmul(A, B)\n out = out.float()\n out = (out * maxB * scale / (127 * 127)) + offset\n\n maxA, Ac = quant_multi(A, dim=2)\n out3 = F.igemm(Ac, Bc)\n out3 = mm_dequant(maxA, maxB, out3)\n\n std = out2.std()\n out2 /= std\n out /= std\n out3 /= std\n\n err = torch.abs(out - out2)\n relerr = err / (torch.abs(out2) + 1e-7)\n\n err2 = torch.abs(out3 - out2)\n relerr2 = err2 / (torch.abs(out2) + 1e-7)\n\n errs.append(err.mean().item())\n relerrs.append(relerr.mean().item())\n errs2.append(err2.mean().item())\n relerrs2.append(relerr2.mean().item())\n # print(mean(errs))\n # print(mean(relerrs))\n # print(mean(errs2))\n # print(mean(relerrs2))\n assert mean(errs) < 0.015\n\n # There's a higher relerr on L40S with torch 2.4+cu118.\n is_sm89 = torch.cuda.get_device_capability() == (8, 9)\n if torch.version.cuda == \"11.8\" and is_sm89 and torch.__version__ < (2, 5):\n assert mean(relerrs) < 0.41\n else:\n assert mean(relerrs) < 0.3\n\n @pytest.mark.parametrize(\"dim1\", [1, 64], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [32, 128], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [32, 256], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [32, 256], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_ibmm(self, dim1, dim2, dim3, dim4, transpose):\n dim2 = dim2 - (dim2 % 16)\n dim3 = dim3 - (dim3 % 16)\n dim4 = dim4 - (dim4 % 16)\n for i in range(k):\n shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)\n shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n\n if not transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A, B.permute([0, 2, 1]))\n elif transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())\n out = F.igemm(A.permute([0, 2, 1]), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))\n torch.testing.assert_close(out.float(), out2.float())\n\n\nclass TestLLMInt8Functional:\n @staticmethod\n def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half):\n \"\"\"Reference implementation for the F.int8_mm_dequant function.\"\"\"\n C = 127.0\n\n x = xq.float()\n if len(S1.shape) == 3 and len(x.shape) == 2:\n S1 = S1.squeeze(0)\n if len(S2.shape) == 3 and len(x.shape) == 2:\n S2 = S2.squeeze(0)\n if len(S1.shape) == 2:\n x *= S1 / C\n else:\n x *= S1 / C\n x *= S2 / C\n return x.to(dtype)\n","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.TestLLMInt8Functional","uri":"program://bitsandbytes/class/tests.test_functional.TestLLMInt8Functional#L568-L828","kind":"class","name":"TestLLMInt8Functional","path":"tests/test_functional.py","language":"python","start_line":568,"end_line":828,"context_start_line":548,"context_end_line":848,"code":" shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)\n shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n\n if not transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A, B.permute([0, 2, 1]))\n elif transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())\n out = F.igemm(A.permute([0, 2, 1]), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))\n torch.testing.assert_close(out.float(), out2.float())\n\n\nclass TestLLMInt8Functional:\n @staticmethod\n def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half):\n \"\"\"Reference implementation for the F.int8_mm_dequant function.\"\"\"\n C = 127.0\n\n x = xq.float()\n if len(S1.shape) == 3 and len(x.shape) == 2:\n S1 = S1.squeeze(0)\n if len(S2.shape) == 3 and len(x.shape) == 2:\n S2 = S2.squeeze(0)\n if len(S1.shape) == 2:\n x *= S1 / C\n else:\n x *= S1 / C\n x *= S2 / C\n return x.to(dtype)\n\n @staticmethod\n def vectorwise_quant(x, dim=1):\n \"\"\"Reference implementation\"\"\"\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n xq = torch.round(x * (127.0 / max1)).to(torch.int8)\n return xq, max1\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [128], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [256], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [499, 512], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [512], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2, 3), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"ldb\", (0,), ids=id_formatter(\"ldb\"))\n def test_int8_linear_matmul(self, device, dim1, dim2, dim3, dim4, dims, ldb):\n for i in range(k):\n if dims == 2:\n A = torch.randint(-128, 127, size=(dim1, dim3), dtype=torch.int8, device=device)\n elif dims == 3:\n A = torch.randint(-128, 127, size=(dim1, dim2, dim3), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, size=(dim4, dim3), dtype=torch.int8, device=device)\n C1 = torch.matmul(A.float(), B.t().float())\n\n C2 = F.int8_linear_matmul(A, B)\n torch.testing.assert_close(C1, C2.float())\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [32], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [32], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [32], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [32], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n def test_int8_linear_matmul_half(self, device, dim1, dim2, dim3, dim4, dims):\n for i in range(k):\n if dims == 2:\n A = torch.normal(0, 0.5, size=(dim1, dim3), device=device).half()\n elif dims == 3:\n A = torch.normal(0, 0.5, size=(dim1, dim2, dim3), device=device).half()\n B = torch.randn((dim4, dim3), device=device).half()\n torch.nn.init.xavier_uniform_(B)\n C1 = torch.matmul(A, B.t())\n\n A = A.view(-1, A.shape[-1])\n\n CA, statsA, _ = F.int8_vectorwise_quant(A)\n CB, statsB, _ = F.int8_vectorwise_quant(B)\n output = F.int8_mm_dequant(F.int8_linear_matmul(CA, CB), statsA, statsB)\n\n torch.testing.assert_close(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", (64, 256), ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim4\", (64, 1024), ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\n def test_dequant_mm(self, device, dim1, dim4, dims, has_bias):\n inner = 128\n bias = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=torch.float16)\n\n for i in range(1):\n A = torch.randn(dim1, inner, device=device)\n B = torch.randn(dim4, inner, device=device)\n C1 = torch.matmul(A.half(), B.t().half())\n if has_bias:\n C1 += bias\n\n A1, maxA = self.vectorwise_quant(A, dim=1)\n B1, maxB = self.vectorwise_quant(B, dim=1)\n\n C2 = F.int8_linear_matmul(A1, B1)\n\n C4 = self.vectorwise_mm_dequant(C2.float(), maxA, maxB.t())\n if has_bias:\n C4 += bias\n\n # TODO: is something wrong here? If so, the problem goes deeper\n # n = C1.numel()\n # p = 0.06\n std = C1.std(0).view(1, -1)\n C1 /= std\n C4 /= std\n # assert_all_approx_close(C1, C4, atol=0.02, rtol=0.1, count=int(n*0.06))\n # assert (count / n < p), f\"error in more than {p} of elements: {count}/{n}={count/n}\"\n\n C5 = F.int8_mm_dequant(C2, maxA, maxB, bias=bias)\n C5 /= std\n torch.testing.assert_close(C5, C4, atol=0.015, rtol=0.1)\n n = C5.numel()\n assert_all_approx_close(C1, C4, atol=0.015, rtol=0.1, count=int(0.01 * n))\n\n @pytest.mark.parametrize(\"dim1\", [1 * 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1 * 1024], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"decomp\"))\n @pytest.mark.deprecated\n def test_colrow_absmax(self, dim1, dim2, dims, threshold):\n for i in range(k):\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n\n assert dims == 2\n\n row_stats1, _ = torch.abs(A.float()).max(1)\n col_stats1, _ = torch.abs(A.float()).max(0)\n\n if threshold > 0.0:\n A_truncated = A.clone()\n A_truncated[torch.abs(A_truncated) >= threshold] = 0.0\n row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)\n col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)\n\n row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(A, threshold=threshold)\n\n nnz_rows1_counts = (torch.abs(A) >= threshold).sum(1).flatten()\n nnz_block_ptr1 = torch.zeros(\n nnz_rows1_counts.shape[0] + 1,\n dtype=nnz_rows1_counts.dtype,\n device=nnz_rows1_counts.device,\n )\n nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)\n\n torch.testing.assert_close(col_stats1_trunc, col_stats2)\n torch.testing.assert_close(row_stats1_trunc, row_stats2)\n # torch.testing.assert_close(nnz_block_ptr1, nnz_block_ptr2)\n else:\n row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(A, threshold=0.0)\n assert nnz_block_ptr2 is None\n torch.testing.assert_close(col_stats1, col_stats2)\n torch.testing.assert_close(row_stats1, row_stats2)\n\n @pytest.mark.parametrize(\"dim1\", [2048, 4096], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [512, 1024], ids=id_formatter(\"dim2\"))\n @pytest.mark.deprecated\n def test_int8_double_quant(self, dim1, dim2):\n for i in range(k):\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n out_col1, Scol = self.vectorwise_quant(A, dim=0)\n out_row1, Srow = self.vectorwise_quant(A, dim=1)\n\n CA, CAt, statsA, statsAt, _ = F.int8_double_quant(A)\n\n # max difference is 1 due to rounding differences\n torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)\n torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)\n\n n = CAt.numel()\n num_not_close_rows = (torch.isclose(CA, out_row1, atol=1) == 0).sum().item()\n num_not_close_cols = (torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()\n\n # allow for 1:500 error due to rounding differences\n min_error = 1 / 500\n if num_not_close_cols > (min_error * n):\n print(\n f\"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols / n:.4f}\"\n )\n assert False\n if num_not_close_rows > (min_error * n):\n print(\n f\"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows / n:.4f}\"\n )\n assert False\n\n torch.testing.assert_close(Srow.flatten().float(), statsA)\n torch.testing.assert_close(Scol.flatten().float(), statsAt)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\n (\"dim1\", \"dim4\", \"inner\"),\n (\n pytest.param(dim1, dim4, inner, id=f\"{dim1=},{dim4=},{inner=}\")\n for (dim1, dim4, inner) in zip(\n (1, 8, 2048, 4096),\n (2, 128, 2048, 4096),\n (4, 256, 512, 4096),\n )\n ),\n )\n def test_integrated_int8_linear_matmul(self, device, dim1, dim4, inner):\n if device == \"cpu\" and inner > 2048:\n pytest.skip(\"Slow on CPU\")\n\n for i in range(k):\n A = torch.randn(dim1, inner, device=device).half()\n B = torch.randn(dim4, inner, device=device).half()\n\n out1 = torch.matmul(A.half(), B.t().half())\n\n C1a, stats1a, _ = F.int8_vectorwise_quant(A)\n C2a, stats2a, _ = F.int8_vectorwise_quant(B)\n A1, maxA = self.vectorwise_quant(A, dim=1)\n B1, maxB = self.vectorwise_quant(B, dim=1)\n\n torch.testing.assert_close(maxA.flatten().float(), stats1a)\n torch.testing.assert_close(maxB.flatten().float(), stats2a)\n torch.testing.assert_close(C1a, A1, rtol=0, atol=1)\n torch.testing.assert_close(C2a, B1, rtol=0, atol=1)\n\n out2 = F.int8_linear_matmul(A1, B1)\n\n C2 = F.int8_linear_matmul(A1, B1)\n\n out3 = self.vectorwise_mm_dequant(C2.float(), maxA, maxB.t())\n\n err1 = torch.abs(out1 - out2).mean().item()\n err2 = torch.abs(out1 - out3).mean().item()\n assert err2 <= err1 * 1.025\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_double_quant(self, device, dim1, dim2):\n threshold = 2.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A1 = A * idx\n A2 = torch.zeros_like(A) + A1\n torch.testing.assert_close(A1, A2)\n\n A[:, outlier_cols] = 0\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n torch.testing.assert_close(A, A2, rtol=0.05, atol=1.5e-2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_int8_vectorwise_quant(self, device, dim1, dim2):\n threshold = 3.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n A[:, outlier_cols] = 0\n torch.testing.assert_close(A * (idx == 0), A2, rtol=0.05, atol=1.5e-2)\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm yet\")\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSpMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [256, 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [128, 512], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"transposed_B\", TRUE_FALSE, ids=id_formatter(\"transposed_B\"))\n def test_spmm_coo(self, dim1, dim2, transposed_B):\n threshold = 1.5\n dim3 = torch.randint(32, 128, size=(1,)).item()\n # dim3 = 17\n for i in range(k):\n A = torch.randn(dim1, dim2).cuda().half()\n if transposed_B:\n B = torch.randn(dim3, dim2).cuda().half()\n else:\n B = torch.randn(dim2, dim3).cuda().half()\n\n idx = torch.abs(A) >= threshold","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.TestSpMMFunctional","uri":"program://bitsandbytes/class/tests.test_functional.TestSpMMFunctional#L833-L1064","kind":"class","name":"TestSpMMFunctional","path":"tests/test_functional.py","language":"python","start_line":833,"end_line":1064,"context_start_line":813,"context_end_line":1084,"code":"\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_int8_vectorwise_quant(self, device, dim1, dim2):\n threshold = 3.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n A[:, outlier_cols] = 0\n torch.testing.assert_close(A * (idx == 0), A2, rtol=0.05, atol=1.5e-2)\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm yet\")\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSpMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [256, 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [128, 512], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"transposed_B\", TRUE_FALSE, ids=id_formatter(\"transposed_B\"))\n def test_spmm_coo(self, dim1, dim2, transposed_B):\n threshold = 1.5\n dim3 = torch.randint(32, 128, size=(1,)).item()\n # dim3 = 17\n for i in range(k):\n A = torch.randn(dim1, dim2).cuda().half()\n if transposed_B:\n B = torch.randn(dim3, dim2).cuda().half()\n else:\n B = torch.randn(dim2, dim3).cuda().half()\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n\n if transposed_B:\n out2 = F.spmm_coo(cooA, B.t())\n out1 = torch.matmul(A2, B.t())\n else:\n out2 = F.spmm_coo(cooA, B)\n out1 = torch.matmul(A2, B)\n\n assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=30)\n\n @pytest.mark.benchmark\n def test_spmm_bench(self):\n batch = 2\n model = 1024 * 1\n hidden = model * 4\n seq = 1024\n dim1 = batch * seq\n dim2 = model\n dim3 = hidden\n threshold = 4\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n B = torch.randn(dim2, dim3, device=\"cuda\").half()\n for i in range(10):\n C1 = bnb.matmul(A, B.t())\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n C1 = bnb.matmul(A, B.t())\n torch.cuda.synchronize()\n t8 = time.time() - t0\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n print(nnz / idx.numel())\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n\n for i in range(10):\n out2 = F.spmm_coo(cooA, B)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n out2 = F.spmm_coo(cooA, B)\n torch.cuda.synchronize()\n tsp = time.time() - t0\n print(tsp, t8)\n print(tsp / t8)\n\n @pytest.mark.parametrize(\"dim1\", [1 * 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [12288], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n @pytest.mark.parametrize(\"out_func\", [\"zeros\", \"ones\"], ids=id_formatter(\"out_func\"))\n def test_spmm_coo_very_sparse(self, dim1, dim2, dtype, out_func):\n out_func = getattr(torch, out_func)\n\n threshold = 3.3\n # threshold = 2.8\n # threshold = 0.0\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n if dtype == torch.float16:\n B = torch.randn(dim2, dim2 * 4, device=\"cuda\").half()\n torch.nn.init.xavier_uniform_(B)\n else:\n B = torch.randn(dim2, dim2 * 4, device=\"cuda\").half()\n torch.nn.init.xavier_uniform_(B)\n\n SB = torch.abs(B).max().float()\n B = torch.round(B / SB * 127).to(torch.int8)\n\n print(\"\")\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n out1 = torch.matmul(A2.half(), B.half())\n out = out_func(out1.shape, dtype=torch.float16, device=out1.device)\n out1 += out.clone()\n out2 = F.spmm_coo_very_sparse(cooA, B, out=out)\n # print(B)\n # print(out1)\n # print(out2)\n p = 200 / (2048 * 12288 * 4)\n n = out1.numel()\n count = math.ceil(p * n)\n std = out1.std()\n out1 /= std\n out2 /= std\n assert_all_approx_close(out1, out2.half(), rtol=0.01, atol=3.0e-2, count=count)\n # assert_all_approx_close(out1, out2.half(), rtol=0.05, atol=0.01, count=count)\n\n idx_col = torch.randint(0, A2.shape[-1], size=(15,))\n\n # torch.testing.assert_close(out1, out2.half(), rtol=0.05, atol=0.001)\n\n # Bt = torch.randn(dim2*4, dim2, device='cuda').half()\n # torch.cuda.synchronize()\n # t0 = time.time()\n # print(A2.shape, B.shape)\n # for i in range(100):\n # #out3 = F.spmm_coo(cooA, Bt.t())\n # #out2 = F.spmm_coo(cooA, B)\n # #out2 = F.spmm_coo_very_sparse(cooA, B)\n # #out1 = torch.matmul(A, Bt.t())\n\n # torch.cuda.synchronize()\n # print(time.time() - t0)\n\n @pytest.mark.parametrize(\"dim1\", [1 * 2048])\n @pytest.mark.parametrize(\"dim2\", [2048])\n @pytest.mark.parametrize(\"dtype\", [torch.int8])\n def test_spmm_coo_dequant(self, dim1, dim2, dtype):\n threshold = 6.0\n # threshold = 2.8\n # threshold = 0.0\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n B = torch.empty(dim2, dim2 * 4, device=\"cuda\", dtype=torch.float16)\n torch.nn.init.xavier_uniform_(B)\n Bt = B.t().contiguous()\n\n CB, CBt, statsB, statsBt, coo_tensor = F.int8_double_quant(B)\n\n rowidx = torch.randint(0, A.shape[-1], size=(15,))\n\n A[:, rowidx] = 8.0\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)\n out1 = torch.matmul(A2, B.half())\n out3 = F.spmm_coo_very_sparse(cooA, CBt.half())\n out3 = out3 * statsBt.half() / 127\n\n values, counts = torch.unique(cooA.rowidx, return_counts=True)\n offset = counts.cumsum(0).int()\n max_count, max_idx = torch.sort(counts, descending=True)\n print(torch.median(max_count.float()))\n\n torch.testing.assert_close(out2, out3, rtol=0.05, atol=0.001)\n\n p = 200 / (2048 * 12288 * 4)\n n = out1.numel()\n count = math.ceil(p * n)\n assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=count)\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(100):\n # out2 = F.spmm_coo_very_sparse(cooA, B)\n # torch.cuda.synchronize()\n # print('fp16', time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = F.spmm_coo(cooA, B)\n torch.cuda.synchronize()\n print(\"cusparse fp16\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = F.spmm_coo_very_sparse(cooA, CBt)\n torch.cuda.synchronize()\n print(\"int8\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)\n torch.cuda.synchronize()\n print(\"int8+dequant\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = torch.matmul(A, B)\n torch.cuda.synchronize()\n print(\"matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)\n out = out1 + out2\n torch.cuda.synchronize()\n print(\"sparse+ matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSparseTensorFunctional:\n def test_coo2csr(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n csrA = F.coo2csr(cooA)\n counts = csrA.rowptr[1:] - csrA.rowptr[:-1]\n assert counts.numel() == A.shape[0]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))\n idx = A2 != 0\n torch.testing.assert_close(A2[idx], csrA.values)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.TestSparseTensorFunctional","uri":"program://bitsandbytes/class/tests.test_functional.TestSparseTensorFunctional#L1068-L1102","kind":"class","name":"TestSparseTensorFunctional","path":"tests/test_functional.py","language":"python","start_line":1068,"end_line":1102,"context_start_line":1048,"context_end_line":1122,"code":" torch.cuda.synchronize()\n print(\"sparse+ matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSparseTensorFunctional:\n def test_coo2csr(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n csrA = F.coo2csr(cooA)\n counts = csrA.rowptr[1:] - csrA.rowptr[:-1]\n assert counts.numel() == A.shape[0]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))\n idx = A2 != 0\n torch.testing.assert_close(A2[idx], csrA.values)\n\n def test_coo2csc(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n cscA = F.coo2csc(cooA)\n counts = cscA.colptr[1:] - cscA.colptr[:-1]\n assert counts.numel() == A.shape[1]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))\n # torch uses row-major -> use transpose to transfer to col-major\n idx = A2.t() != 0\n torch.testing.assert_close(A2.t()[idx], cscA.values)\n\n\nclass TestQuantize4BitFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256, 512, 1024, 2048, 4096] if not HIP_ENVIRONMENT else [128, 256, 512, 1024, 2048, 4096],\n )\n def test_4bit_quant(self, device, dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n qa, SA = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n A2 = F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-8)).mean()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.TestQuantize4BitFunctional","uri":"program://bitsandbytes/class/tests.test_functional.TestQuantize4BitFunctional#L1105-L1442","kind":"class","name":"TestQuantize4BitFunctional","path":"tests/test_functional.py","language":"python","start_line":1105,"end_line":1442,"context_start_line":1085,"context_end_line":1459,"code":"\n def test_coo2csc(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n cscA = F.coo2csc(cooA)\n counts = cscA.colptr[1:] - cscA.colptr[:-1]\n assert counts.numel() == A.shape[1]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))\n # torch uses row-major -> use transpose to transfer to col-major\n idx = A2.t() != 0\n torch.testing.assert_close(A2.t()[idx], cscA.values)\n\n\nclass TestQuantize4BitFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256, 512, 1024, 2048, 4096] if not HIP_ENVIRONMENT else [128, 256, 512, 1024, 2048, 4096],\n )\n def test_4bit_quant(self, device, dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n qa, SA = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n A2 = F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-8)).mean()\n err = err.mean()\n\n assert A2.dtype == dtype\n\n # With larger block sizes, we can expect this to blow up.\n # At blocksize>=1024, don't even bother looking at relerr.\n #\n # Actually, the above is not true anymore after fixing the integer packing bug.\n # The following values were taken from averaging 1k samples per test configuration after fixing the bug.\n error_dict = dict()\n error_dict[\"fp4\"] = dict()\n error_dict[\"nf4\"] = dict()\n error_dict[\"fp4\"][\"err\"] = {\n 64: 0.096545,\n 128: 0.102947,\n 256: 0.108685,\n 512: 0.114087,\n 1024: 0.119312,\n 2048: 0.124460,\n 4096: 0.129573,\n }\n error_dict[\"fp4\"][\"rel_err\"] = {\n 64: 0.260130,\n 128: 0.275734,\n 256: 0.289842,\n 512: 0.302852,\n 1024: 0.314982,\n 2048: 0.326402,\n 4096: 0.337228,\n }\n\n error_dict[\"nf4\"][\"err\"] = {\n 64: 0.072792,\n 128: 0.076835,\n 256: 0.080326,\n 512: 0.083535,\n 1024: 0.086603,\n 2048: 0.089592,\n 4096: 0.092537,\n }\n error_dict[\"nf4\"][\"rel_err\"] = {\n 64: 0.203299,\n 128: 0.215252,\n 256: 0.226044,\n 512: 0.236021,\n 1024: 0.245365,\n 2048: 0.254146,\n 4096: 0.262457,\n }\n\n # Allow higher tolerance for fp32 on CPU with larger block sizes\n reltol = 2.8e-3 if dtype == torch.float32 and blocksize >= 128 and device == \"cpu\" else 1e-3\n errtol = 1.2e-3 if dtype == torch.float32 and blocksize >= 1024 and device == \"cpu\" else 1e-3\n\n assert err < error_dict[quant_type][\"err\"][blocksize] + errtol\n assert relerr < error_dict[quant_type][\"rel_err\"][blocksize] + reltol\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128], ids=id_formatter(\"blocksize\"))\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16], ids=describe_dtype)\n def test_4bit_compressed_stats(self, device, quant_type, blocksize, dtype):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"FP4 quantization is not supported on HPU.\")\n\n errs1 = []\n errs2 = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n q2, SA2 = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n q3, SA3 = F.quantize_4bit(A1, blocksize=blocksize, compress_statistics=True, quant_type=quant_type)\n A2 = F.dequantize_4bit(q2, SA2, quant_type=quant_type)\n A3 = F.dequantize_4bit(q3, SA3, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-15)).mean()\n err = err.mean()\n\n errs1.append(err.item())\n\n assert err.item() < 0.11\n assert relerr.item() < 0.28\n\n err = (A1 - A3).abs().float()\n relerr = (err / (A1.abs().float() + 1e-15)).mean()\n err = err.mean()\n\n errs2.append(err.item())\n\n assert err.item() < 0.11\n assert relerr.item() < 0.28\n\n # @pytest.mark.parametrize(\"quant_type\", ['fp4', 'nf4'])\n @pytest.mark.parametrize(\"quant_type\", [\"nf4\"])\n @pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\n @pytest.mark.benchmark\n def test_bench_4bit_dequant(self, quant_type):\n blocksize = 256\n a = torch.rand(1024 * 12 * 4, 1024 * 12, device=\"cuda\").half()\n qa, SA = F.quantize_4bit(a, blocksize=blocksize, quant_type=quant_type)\n\n input_size = a.numel() / 2\n output_size = a.numel() * 2\n num_bytes = input_size + output_size\n GB = num_bytes / 1e9\n max_theoretical_s = GB / 768\n # print(max_theoretical_s*1e6)\n b = torch.randn(128, 1024 * 12, device=\"cuda\").half()\n\n iters = 100\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)\n # b.copy_(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/iters*1e6)\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # torch.matmul(b, a.t())\n # torch.cuda.synchronize()\n # print((time.time()-t0)/iters*1e6)\n\n @pytest.mark.skipif(\n HIP_ENVIRONMENT, reason=\"gemv 4bit tests are partially enabled on MI300, others being fixed for warpsize 64\"\n )\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"double_quant\", TRUE_FALSE, ids=lambda double_quant: f\"DQ_{double_quant}\")\n @pytest.mark.parametrize(\"storage_type\", [\"nf4\", \"fp4\"])\n @pytest.mark.parametrize(\"kind\", [\"fc1\", \"fc2\", \"attn\", \"attn_packed\"])\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n @pytest.mark.parametrize(\n \"quant_storage\",\n [torch.uint8, torch.float16, torch.bfloat16, torch.float32],\n ids=describe_dtype,\n )\n @pytest.mark.parametrize(\"dim\", [128, 256, 512, 1024], ids=id_formatter(\"dim\"))\n def test_gemv_4bit(self, device, dim, dtype, storage_type, quant_storage, double_quant, kind):\n if device == \"hpu\" and not is_supported_on_hpu(storage_type, dtype, quant_storage):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n errs1 = []\n errs2 = []\n errs3 = []\n relerrs1 = []\n relerrs2 = []\n relerrs3 = []\n max_errs1 = []\n max_errs2 = []\n max_errs3 = []\n\n # Large number of iterations is excessive and slow on CPU.\n # Keep for CUDA/XPU for now.\n iters = 10 if device == \"cpu\" else 100\n\n for i in range(iters):\n if kind == \"fc1\":\n A = torch.randn(1, dim, dtype=dtype, device=device)\n B = torch.randn(dim * 4, dim, dtype=dtype, device=device) / math.sqrt(dim)\n elif kind == \"fc2\":\n A = torch.randn(1, 4 * dim, dtype=dtype, device=device)\n B = torch.randn(dim, 4 * dim, dtype=dtype, device=device) / math.sqrt(dim)\n elif kind == \"attn\":\n A = torch.randn(1, dim, dtype=dtype, device=device)\n B = torch.randn(dim, dim, dtype=dtype, device=device) / math.sqrt(dim)\n elif kind == \"attn_packed\":\n A = torch.randn(1, dim, dtype=dtype, device=device)\n B = torch.randn(dim * 3, dim, dtype=dtype, device=device) / math.sqrt(dim)\n\n qB, state = F.quantize_4bit(\n B,\n quant_type=storage_type,\n compress_statistics=double_quant,\n quant_storage=quant_storage,\n )\n C3 = torch.matmul(A, B.t())\n C2 = F.gemv_4bit(A, qB.t(), state=state)\n A.requires_grad = True\n C1 = bnb.matmul_4bit(A, qB.t(), state)\n\n err1 = (C1 - C2).abs().float()\n err2 = (C3 - C2).abs().float()\n err3 = (C3 - C1).abs().float()\n\n mag1 = torch.abs(C1).float() + 1e-5\n mag2 = torch.abs(C3).float() + 1e-5\n mag3 = torch.abs(C3).float() + 1e-5\n\n relerr1 = err1 / mag1\n relerr2 = err2 / mag2\n relerr3 = err3 / mag3\n\n max_err1 = err1.max()\n max_err2 = err2.max()\n max_err3 = err3.max()\n\n errs1.append(err1.mean().item())\n errs2.append(err2.mean().item())\n errs3.append(err3.mean().item())\n\n relerrs1.append(relerr1.mean().item())\n relerrs2.append(relerr2.mean().item())\n relerrs3.append(relerr3.mean().item())\n\n max_errs1.append(max_err1.item())\n max_errs2.append(max_err2.item())\n max_errs3.append(max_err3.item())\n\n c = int(C1.numel() * 0.0014 * (dim / 256)) + 1\n\n c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=0, throw=False)\n err1 = sum(errs1) / len(errs1) / math.sqrt(dim)\n err2 = sum(errs2) / len(errs2) / math.sqrt(dim)\n err3 = sum(errs3) / len(errs3) / math.sqrt(dim)\n relerr1 = sum(relerrs1) / len(relerrs1) / math.sqrt(dim)\n relerr2 = sum(relerrs2) / len(relerrs2) / math.sqrt(dim)\n relerr3 = sum(relerrs3) / len(relerrs3) / math.sqrt(dim)\n maxerr1 = sum(max_errs1) / len(max_errs1) / math.sqrt(dim)\n maxerr2 = sum(max_errs2) / len(max_errs2) / math.sqrt(dim)\n maxerr3 = sum(max_errs3) / len(max_errs3) / math.sqrt(dim)\n absratio = err2 / err3\n relratio = relerr2 / relerr3\n maxratio = relerr2 / relerr3\n\n # for debugging if the tests fails\n #\n # print('='*80)\n # print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')\n # print(C1.flatten()[-20:])\n # print(C2.flatten()[-20:])\n # print(f'inference vs training abs: {err1}')\n # print(f'inference vs training rel: {relerr1}')\n # print(f'inference vs training max: {maxerr1}')\n # print(f'inference vs training vs torch err ratio abs: {absratio}')\n # print(f'inference vs training vs torch err ratio rel: {relratio}')\n # print(f'inference vs training vs torch err ratio max: {maxratio}')\n if dtype == torch.float16:\n if dim <= 512:\n assert err1 < 7e-5\n\n # TODO(matthewdouglas): On T4, dim=128-fp16-fc2-fp4-DQ will have relerror ~ 0.00092727\n if (\n device == \"cuda\"\n and double_quant\n and storage_type == \"fp4\"\n and kind == \"fc2\"\n and torch.cuda.get_device_capability() == (7, 5)\n ):\n assert relerr1 < 0.00093\n else:\n assert relerr1 < 0.0008\n else:\n assert err1 < 6e-5\n assert relerr1 < 2e-4\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.005 and relratio > 0.992\n assert maxratio < 1.005 and maxratio > 0.992\n elif dtype == torch.float32:\n if dim <= 512:\n assert err1 < 5e-8\n assert relerr1 < 1e-6\n assert maxerr1 < 1.05e-7\n else:\n assert err1 < 5e-8\n assert relerr1 < 8e-6\n assert maxerr1 < 1e-7\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.005 and relratio > 0.995\n assert maxratio < 1.005 and maxratio > 0.995\n elif dtype == torch.bfloat16:\n if dim <= 512:\n relerr_thres = 0.013 if hasattr(torch, \"xpu\") and torch.xpu.is_available() else 0.007\n assert err1 < 6e-4\n assert relerr1 < relerr_thres\n assert maxerr1 < 0.015\n else:\n assert err1 < 2e-4\n assert relerr1 < 0.002\n assert maxerr1 < 0.0012\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.05 and relratio > 0.96\n assert maxratio < 1.05 and maxratio > 0.97\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"storage_type\", [\"nf4\", \"fp4\"], ids=[\"nf4\", \"fp4\"])\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n @pytest.mark.skipif(\n HIP_ENVIRONMENT and ROCM_GPU_ARCH == \"gfx90a\",\n reason=\"this test is not supported on ROCm with gfx90a architecture yet\",\n )\n def test_gemv_eye_4bit(self, device, storage_type, dtype):\n if device == \"hpu\" and not is_supported_on_hpu(storage_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if (\n device == \"cpu\"\n and platform.system() == \"Windows\"\n and version.parse(torch.__version__).release == (2, 8, 0)\n ):\n pytest.skip(\"Regression: CPU crash on Windows with torch 2.8.0\")\n\n dims = 4\n dims = get_test_dims(0, 8192, n=dims)\n dims = [dim + (64 - (dim % 64)) for dim in dims]\n # for dim in [576, 5120, 3520, 5184, 1280, 4992, 5312, 2048]:\n for dim in dims:\n A = torch.normal(0, 0.1, size=(1, 1, dim), dtype=dtype, device=device)\n B = torch.eye(dim, dtype=dtype, device=device)\n\n qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=False)\n C3 = torch.matmul(A, B.t())\n C2 = bnb.matmul_4bit(A, qB.t(), state)\n A.requires_grad = True\n C1 = bnb.matmul_4bit(A, qB.t(), state)\n\n torch.testing.assert_close(A, C3)\n torch.testing.assert_close(A, C1)\n torch.testing.assert_close(A, C2)\n # torch.testing.assert_close(A, C1, rtol=1e-5, atol=0.00001)\n # torch.testing.assert_close(A, C2, rtol=1e-5, atol=0.080)\n\n\ndef test_normal_map_tree():\n code = F.create_normal_map()\n values = code[:8].tolist() + code[-8:].tolist()\n num_pivots = 1\n # print(values)\n while num_pivots < 16:\n idx = list(range(16 // num_pivots // 2, 16, 16 // num_pivots))\n # print(idx)\n num_pivots *= 2\n pivots = []\n for i in idx:\n pivots.append((values[i - 1] + values[i]) / 2)\n # print(pivots)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_normal_map_tree","uri":"program://bitsandbytes/function/tests.test_functional.test_normal_map_tree#L1447-L1458","kind":"function","name":"test_normal_map_tree","path":"tests/test_functional.py","language":"python","start_line":1447,"end_line":1458,"context_start_line":1427,"context_end_line":1459,"code":" dims = get_test_dims(0, 8192, n=dims)\n dims = [dim + (64 - (dim % 64)) for dim in dims]\n # for dim in [576, 5120, 3520, 5184, 1280, 4992, 5312, 2048]:\n for dim in dims:\n A = torch.normal(0, 0.1, size=(1, 1, dim), dtype=dtype, device=device)\n B = torch.eye(dim, dtype=dtype, device=device)\n\n qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=False)\n C3 = torch.matmul(A, B.t())\n C2 = bnb.matmul_4bit(A, qB.t(), state)\n A.requires_grad = True\n C1 = bnb.matmul_4bit(A, qB.t(), state)\n\n torch.testing.assert_close(A, C3)\n torch.testing.assert_close(A, C1)\n torch.testing.assert_close(A, C2)\n # torch.testing.assert_close(A, C1, rtol=1e-5, atol=0.00001)\n # torch.testing.assert_close(A, C2, rtol=1e-5, atol=0.080)\n\n\ndef test_normal_map_tree():\n code = F.create_normal_map()\n values = code[:8].tolist() + code[-8:].tolist()\n num_pivots = 1\n # print(values)\n while num_pivots < 16:\n idx = list(range(16 // num_pivots // 2, 16, 16 // num_pivots))\n # print(idx)\n num_pivots *= 2\n pivots = []\n for i in idx:\n pivots.append((values[i - 1] + values[i]) / 2)\n # print(pivots)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.__init__","uri":"program://bitsandbytes/function/tests.test_functional.__init__#L56-L59","kind":"function","name":"__init__","path":"tests/test_functional.py","language":"python","start_line":56,"end_line":59,"context_start_line":36,"context_end_line":79,"code":" return sumval\n\n\nclass FFN(torch.nn.Module):\n def __init__(self, input_features, hidden_size, bias=True):\n super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()\n ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.forward","uri":"program://bitsandbytes/function/tests.test_functional.forward#L49-L52","kind":"function","name":"forward","path":"tests/test_functional.py","language":"python","start_line":49,"end_line":52,"context_start_line":29,"context_end_line":72,"code":" idx = torch.isclose(a, b, rtol=rtol, atol=atol)\n sumval = (idx == 0).sum().item()\n if sumval > count:\n if throw:\n print(f\"Too many values not close: assert {sumval} < {count}\")\n torch.testing.assert_close(a, b, rtol=rtol, atol=atol)\n\n return sumval\n\n\nclass FFN(torch.nn.Module):\n def __init__(self, input_features, hidden_size, bias=True):\n super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.tick","uri":"program://bitsandbytes/function/tests.test_functional.tick#L61-L67","kind":"function","name":"tick","path":"tests/test_functional.py","language":"python","start_line":61,"end_line":67,"context_start_line":41,"context_end_line":87,"code":" super().__init__()\n self.fc1 = torch.nn.Linear(input_features, hidden_size, bias=bias)\n self.fc2 = torch.nn.Linear(hidden_size, input_features, bias=bias)\n\n with torch.no_grad():\n torch.nn.init.xavier_uniform_(self.fc1.weight)\n torch.nn.init.xavier_uniform_(self.fc2.weight)\n\n def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()\n ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)\n\n if print_ms and name in self.agg:\n print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.tock","uri":"program://bitsandbytes/function/tests.test_functional.tock#L69-L84","kind":"function","name":"tock","path":"tests/test_functional.py","language":"python","start_line":69,"end_line":84,"context_start_line":49,"context_end_line":104,"code":" def forward(self, x):\n x = torch.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nclass Timer:\n def __init__(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n\n def tick(self, name=\"default\"):\n if name not in self.starts:\n self.starts[name] = torch.cuda.Event(enable_timing=True)\n self.ends[name] = torch.cuda.Event(enable_timing=True)\n self.starts[name].record()\n else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()\n ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)\n\n if print_ms and name in self.agg:\n print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n print(\"Resetting benchmark data\")\n\n\nclass Test8BitBlockwiseQuantizeFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"nested\", TRUE_FALSE, ids=id_formatter(\"nested\"))\n @pytest.mark.parametrize(\n \"blocksize\",\n [4096, 2048, 1024, 512, 256, 128, 64] if not HIP_ENVIRONMENT else [4096, 2048, 1024, 512, 256, 128],\n )\n @pytest.mark.parametrize(\"signed\", TRUE_FALSE, ids=id_formatter(\"signed\"))\n def test_dynamic_blockwise_quantization(self, device, dtype, nested, blocksize, signed):\n iters = 100\n","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.reset","uri":"program://bitsandbytes/function/tests.test_functional.reset#L86-L90","kind":"function","name":"reset","path":"tests/test_functional.py","language":"python","start_line":86,"end_line":90,"context_start_line":66,"context_end_line":110,"code":" else:\n ms = self.tock(name, evict=True, print_ms=False)\n\n def tock(self, name=\"default\", evict=True, print_ms=True):\n if name in self.ends:\n self.ends[name].record()\n torch.cuda.synchronize()\n ms = self.starts[name].elapsed_time(self.ends[name])\n if name not in self.agg:\n self.agg[name] = 0.0\n self.agg[name] += ms\n if evict:\n self.starts.pop(name)\n self.ends.pop(name)\n\n if print_ms and name in self.agg:\n print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n print(\"Resetting benchmark data\")\n\n\nclass Test8BitBlockwiseQuantizeFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"nested\", TRUE_FALSE, ids=id_formatter(\"nested\"))\n @pytest.mark.parametrize(\n \"blocksize\",\n [4096, 2048, 1024, 512, 256, 128, 64] if not HIP_ENVIRONMENT else [4096, 2048, 1024, 512, 256, 128],\n )\n @pytest.mark.parametrize(\"signed\", TRUE_FALSE, ids=id_formatter(\"signed\"))\n def test_dynamic_blockwise_quantization(self, device, dtype, nested, blocksize, signed):\n iters = 100\n\n if device != \"cuda\":\n iters = 10\n\n # This test is slow in our non-CUDA implementations, so avoid atypical use cases.\n if nested:\n pytest.skip(\"Not a typical use case.\")","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_dynamic_blockwise_quantization","uri":"program://bitsandbytes/function/tests.test_functional.test_dynamic_blockwise_quantization#L102-L152","kind":"function","name":"test_dynamic_blockwise_quantization","path":"tests/test_functional.py","language":"python","start_line":102,"end_line":152,"context_start_line":82,"context_end_line":172,"code":" print(f\"{name} took: {self.agg[name] / 1000.0:.5f}s\")\n\n return self.agg[name]\n\n def reset(self):\n self.starts = {}\n self.ends = {}\n self.agg = {}\n print(\"Resetting benchmark data\")\n\n\nclass Test8BitBlockwiseQuantizeFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"nested\", TRUE_FALSE, ids=id_formatter(\"nested\"))\n @pytest.mark.parametrize(\n \"blocksize\",\n [4096, 2048, 1024, 512, 256, 128, 64] if not HIP_ENVIRONMENT else [4096, 2048, 1024, 512, 256, 128],\n )\n @pytest.mark.parametrize(\"signed\", TRUE_FALSE, ids=id_formatter(\"signed\"))\n def test_dynamic_blockwise_quantization(self, device, dtype, nested, blocksize, signed):\n iters = 100\n\n if device != \"cuda\":\n iters = 10\n\n # This test is slow in our non-CUDA implementations, so avoid atypical use cases.\n if nested:\n pytest.skip(\"Not a typical use case.\")\n if blocksize != 256:\n pytest.skip(\"Only blocksize 256 is used in CPU/MPS/XPU\")\n if dtype != torch.float32:\n pytest.skip(\"Only float32 is used in CPU/MPS/XPU\")\n\n diffs = []\n reldiffs = []\n for i in range(iters):\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested)\n A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n assert abserr < 0.011\n assert relerr < 0.018\n assert A2.dtype == dtype\n\n diffs = []\n code = F.create_dynamic_map(signed=signed)\n for i in range(iters):\n A1 = torch.rand(1024, 1024, device=device, dtype=dtype)\n C, S = F.quantize_blockwise(A1, blocksize=blocksize, nested=nested, code=code)\n A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n # torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n if signed:\n threshold_abserr = 0.0035\n assert abserr < 0.0036\n assert relerr < 0.015\n else:\n assert abserr < 0.0023\n assert relerr < 0.012\n assert A2.dtype == dtype\n\n @pytest.mark.skipif(\"cpu\" not in get_available_devices(), reason=\"CPU is required\")\n @pytest.mark.parametrize(\"hidden\", [128])\n @pytest.mark.parametrize(\"blocksize\", [4096, 16384])\n def test_blockwise_cpu_large(self, hidden, blocksize):\n diffs = []\n reldiffs = []\n batch = 128\n seq = 128\n\n for i in range(2):\n A1 = torch.randn(batch, seq, hidden, device=\"cpu\")\n t0 = time.time()\n C, S = F.quantize_blockwise(A1, blocksize=blocksize)\n A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)\n print(time.time() - t0)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_blockwise_cpu_large","uri":"program://bitsandbytes/function/tests.test_functional.test_blockwise_cpu_large#L157-L173","kind":"function","name":"test_blockwise_cpu_large","path":"tests/test_functional.py","language":"python","start_line":157,"end_line":173,"context_start_line":137,"context_end_line":193,"code":" A2 = F.dequantize_blockwise(C, S)\n diff = torch.abs(A1 - A2).float()\n reldiff = diff / torch.abs(A1.float() + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n # torch.testing.assert_close(A1, A2, atol=1e-2, rtol=0)\n abserr = sum(diffs) / len(diffs)\n relerr = sum(reldiffs) / len(reldiffs)\n if signed:\n threshold_abserr = 0.0035\n assert abserr < 0.0036\n assert relerr < 0.015\n else:\n assert abserr < 0.0023\n assert relerr < 0.012\n assert A2.dtype == dtype\n\n @pytest.mark.skipif(\"cpu\" not in get_available_devices(), reason=\"CPU is required\")\n @pytest.mark.parametrize(\"hidden\", [128])\n @pytest.mark.parametrize(\"blocksize\", [4096, 16384])\n def test_blockwise_cpu_large(self, hidden, blocksize):\n diffs = []\n reldiffs = []\n batch = 128\n seq = 128\n\n for i in range(2):\n A1 = torch.randn(batch, seq, hidden, device=\"cpu\")\n t0 = time.time()\n C, S = F.quantize_blockwise(A1, blocksize=blocksize)\n A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)\n print(time.time() - t0)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diffs[-1] < 0.011\n # print(sum(diffs)/len(diffs))\n # print(sum(reldiffs)/len(reldiffs))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"bits\", range(2, 9), ids=id_formatter(\"bits\"))\n @pytest.mark.parametrize(\"method\", [\"linear\", \"fp8\", \"dynamic\"])\n def test_few_bit_quant(self, device, bits, method):\n if bits != 8 and device == \"cpu\":\n pytest.skip(\"CPU implementation only supports 8 bits\")\n\n abserrs = []\n relerrs = []\n code = None\n if method == \"linear\":\n code = F.create_linear_map(True, total_bits=bits).to(device)\n elif method == \"fp8\":\n ebits = math.ceil(bits / 2)\n pbits = bits - ebits - 1\n code = F.create_fp8_map(True, ebits, pbits, bits).to(device)\n elif method == \"dynamic\":","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_few_bit_quant","uri":"program://bitsandbytes/function/tests.test_functional.test_few_bit_quant#L180-L229","kind":"function","name":"test_few_bit_quant","path":"tests/test_functional.py","language":"python","start_line":180,"end_line":229,"context_start_line":160,"context_end_line":249,"code":" batch = 128\n seq = 128\n\n for i in range(2):\n A1 = torch.randn(batch, seq, hidden, device=\"cpu\")\n t0 = time.time()\n C, S = F.quantize_blockwise(A1, blocksize=blocksize)\n A2 = F.dequantize_blockwise(C, S, blocksize=blocksize)\n print(time.time() - t0)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())\n assert diffs[-1] < 0.011\n # print(sum(diffs)/len(diffs))\n # print(sum(reldiffs)/len(reldiffs))\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"bits\", range(2, 9), ids=id_formatter(\"bits\"))\n @pytest.mark.parametrize(\"method\", [\"linear\", \"fp8\", \"dynamic\"])\n def test_few_bit_quant(self, device, bits, method):\n if bits != 8 and device == \"cpu\":\n pytest.skip(\"CPU implementation only supports 8 bits\")\n\n abserrs = []\n relerrs = []\n code = None\n if method == \"linear\":\n code = F.create_linear_map(True, total_bits=bits).to(device)\n elif method == \"fp8\":\n ebits = math.ceil(bits / 2)\n pbits = bits - ebits - 1\n code = F.create_fp8_map(True, ebits, pbits, bits).to(device)\n elif method == \"dynamic\":\n code = F.create_dynamic_map(True, bits - 0, bits).to(device)\n\n # for some data types we have no zero\n # for some data types we have one zero\n # for some data types we have two zeros\n assert torch.unique(code).numel() in [2**bits, 2**bits - 1], f\"bits: {bits}, method: {method}\"\n # print(method, (code==0).sum())\n assert code.numel() == 256\n for i in range(10):\n values = torch.randn(1, 32, device=device)\n values /= values.abs().max()\n # values[values.abs() < 1e-6] += 1e-5\n\n q1 = []\n v1 = []\n for v in values[0]:\n idx = torch.abs(v - code).argmin()\n q1.append(idx.item())\n v1.append(code[idx].item())\n\n q1 = torch.tensor(q1, device=device)\n v1 = torch.tensor(v1, device=device)\n\n q2, S2 = F.quantize_blockwise(values, code=code)\n v2 = F.dequantize_blockwise(q2, S2)\n\n idx = torch.isclose(q1.int(), q2.int())\n err2 = torch.abs(v2 - values)\n abserrs.append(err2.mean().item())\n relerrs.append((err2 / (1e-10 + values).abs()).mean().item())\n if idx.sum():\n # some weird cases\n err1 = torch.abs(v1 - values).mean()\n # assert err2.mean() <= err1\n else:\n torch.testing.assert_close(q1, q2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_fp8_quant(self, device):\n # TODO\n if device == \"cpu\":\n pytest.skip(\"CPU implementation segfaults\")\n\n for e_bits in range(1, 7):\n p_bits = 7 - e_bits\n code = F.create_fp8_map(True, e_bits, p_bits).to(device)\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_fp8_quant","uri":"program://bitsandbytes/function/tests.test_functional.test_fp8_quant#L232-L278","kind":"function","name":"test_fp8_quant","path":"tests/test_functional.py","language":"python","start_line":232,"end_line":278,"context_start_line":212,"context_end_line":298,"code":" v1.append(code[idx].item())\n\n q1 = torch.tensor(q1, device=device)\n v1 = torch.tensor(v1, device=device)\n\n q2, S2 = F.quantize_blockwise(values, code=code)\n v2 = F.dequantize_blockwise(q2, S2)\n\n idx = torch.isclose(q1.int(), q2.int())\n err2 = torch.abs(v2 - values)\n abserrs.append(err2.mean().item())\n relerrs.append((err2 / (1e-10 + values).abs()).mean().item())\n if idx.sum():\n # some weird cases\n err1 = torch.abs(v1 - values).mean()\n # assert err2.mean() <= err1\n else:\n torch.testing.assert_close(q1, q2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_fp8_quant(self, device):\n # TODO\n if device == \"cpu\":\n pytest.skip(\"CPU implementation segfaults\")\n\n for e_bits in range(1, 7):\n p_bits = 7 - e_bits\n code = F.create_fp8_map(True, e_bits, p_bits).to(device)\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.rand(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1, code=code)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(3, sum(abserr)/len(abserr))\n # print(3, sum(relerr)/len(relerr))\n\n @pytest.mark.benchmark\n def test_bench_dequantization(self):\n a = torch.rand(1024, 1024, device=\"cuda\").half()\n code = F.create_fp8_map(True, 3, 0, 4).cuda()\n qa, SA = F.quantize_blockwise(a, code=code)\n print(qa.max())\n\n max_theoretical_mu = 1024 * 1024 * 2 / 1024**3 / 672 * 1000 * 1000\n # print(max_theoretical_mu)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_bench_dequantization","uri":"program://bitsandbytes/function/tests.test_functional.test_bench_dequantization#L284-L297","kind":"function","name":"test_bench_dequantization","path":"tests/test_functional.py","language":"python","start_line":284,"end_line":297,"context_start_line":264,"context_end_line":317,"code":" relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(sum(abserr)/len(abserr))\n # print(sum(relerr)/len(relerr))\n\n abserr = []\n relerr = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device)\n C, SC = F.quantize_blockwise(A1)\n A2 = F.dequantize_blockwise(C, SC)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n abserr.append(diff.mean().item())\n relerr.append(reldiff.mean().item())\n # assert diff < 0.0075\n # print(3, sum(abserr)/len(abserr))\n # print(3, sum(relerr)/len(relerr))\n\n @pytest.mark.benchmark\n def test_bench_dequantization(self):\n a = torch.rand(1024, 1024, device=\"cuda\").half()\n code = F.create_fp8_map(True, 3, 0, 4).cuda()\n qa, SA = F.quantize_blockwise(a, code=code)\n print(qa.max())\n\n max_theoretical_mu = 1024 * 1024 * 2 / 1024**3 / 672 * 1000 * 1000\n # print(max_theoretical_mu)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n qa, SA = F.quantize_blockwise(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/1e6)\n\n\ndef test_stable_embedding():\n layer = bnb.nn.StableEmbedding(1024, 1024)\n layer.reset_parameters()\n\n\ndef quant(x):\n max1 = torch.abs(x).max()\n x = torch.round(x / max1 * 127)\n return max1, x.to(torch.int8)\n\n\ndef dequant(c, maxC):\n return c.float() * (maxC / 127)\n\n\ndef mm_dequant(maxA, maxB, C):\n return C.float() * (maxA / 127) * (maxB / 127)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_approx_igemm","uri":"program://bitsandbytes/function/tests.test_functional.test_approx_igemm#L365-L396","kind":"function","name":"test_approx_igemm","path":"tests/test_functional.py","language":"python","start_line":365,"end_line":396,"context_start_line":345,"context_end_line":416,"code":"\n\nmethods = {\n \"linear\": (\n lambda x, dim: quant(x),\n lambda x, dim: quant(x),\n dequant,\n dequant,\n mm_dequant,\n ),\n \"vectorwise\": (quant_multi, quant_multi, dequant, dequant, mm_dequant),\n}\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestIGEMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [1024 * 2], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024 * 16], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"quant_methods\", methods.values(), ids=methods.keys())\n @pytest.mark.parametrize(\"batched\", TRUE_FALSE, ids=id_formatter(\"batched\"))\n def test_approx_igemm(self, dim1, dim2, quant_methods, batched):\n dim1 = dim1 - (dim1 % 32)\n dim2 = dim2 - (dim2 % 32)\n errors = []\n relerrors = []\n # print(\"\")\n for i in range(5):\n if batched:\n A = torch.normal(0, 0.5, size=(32, dim1, dim2 // 32), device=\"cuda\")\n B = torch.normal(0, 0.5, size=(32, dim2 // 32, dim1), device=\"cuda\")\n maxA, Ac = quant_methods[0](A, 2)\n maxB, Bc = quant_methods[1](B, 1)\n else:\n A = torch.normal(0, 0.5, size=(dim1, dim2), device=\"cuda\")\n B = torch.normal(0, 0.5, size=(dim2, dim1), device=\"cuda\")\n maxA, Ac = quant_methods[0](A, 1)\n maxB, Bc = quant_methods[1](B, 0)\n torch.testing.assert_close(quant_methods[2](maxA, Ac), A, atol=0.025, rtol=0.05)\n if batched:\n out2 = torch.bmm(A, B)\n C = torch.bmm(Ac.float(), Bc.float())\n else:\n out2 = torch.mm(A, B)\n C = F.igemm(Ac, Bc)\n out = quant_methods[4](maxA, maxB, C)\n std = out2.std()\n out /= std\n out2 /= std\n err = torch.abs(out - out2)\n relerr = err / torch.abs(out2)\n errors.append(err.mean().item())\n relerrors.append(relerr.mean().item())\n # print(mean(errors))\n # print(mean(relerrors))\n\n @pytest.mark.parametrize(\"hidden_dim\", [32, 256], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [16, 256], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"seq_dim\", [16, 256], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_igemm(self, hidden_dim, batch_dim, transpose, seq_dim):\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 16)\n seq_dim = seq_dim - (seq_dim % 16)\n for i in range(k):\n shapeA = (batch_dim, hidden_dim) if not transpose[0] else (hidden_dim, batch_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_igemm","uri":"program://bitsandbytes/function/tests.test_functional.test_igemm#L404-L444","kind":"function","name":"test_igemm","path":"tests/test_functional.py","language":"python","start_line":404,"end_line":444,"context_start_line":384,"context_end_line":464,"code":" out2 = torch.bmm(A, B)\n C = torch.bmm(Ac.float(), Bc.float())\n else:\n out2 = torch.mm(A, B)\n C = F.igemm(Ac, Bc)\n out = quant_methods[4](maxA, maxB, C)\n std = out2.std()\n out /= std\n out2 /= std\n err = torch.abs(out - out2)\n relerr = err / torch.abs(out2)\n errors.append(err.mean().item())\n relerrors.append(relerr.mean().item())\n # print(mean(errors))\n # print(mean(relerrors))\n\n @pytest.mark.parametrize(\"hidden_dim\", [32, 256], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [16, 256], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"seq_dim\", [16, 256], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_igemm(self, hidden_dim, batch_dim, transpose, seq_dim):\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 16)\n seq_dim = seq_dim - (seq_dim % 16)\n for i in range(k):\n shapeA = (batch_dim, hidden_dim) if not transpose[0] else (hidden_dim, batch_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n elif transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.t().float(), B.float())\n out = F.igemm(A.t(), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.matmul(A.t().float(), B.t().float())\n out = F.igemm(A.t(), B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n for i in range(k):\n shapeA = (batch_dim, seq_dim, hidden_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 256, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [64, 1024, 4096], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 8, 16], ids=id_formatter(\"batch_dim\"))\n def test_dim3_igemm(self, seq_dim, hidden_dim, batch_dim):\n seq_dim = seq_dim - (seq_dim % 32)\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 2)\n for i in range(25):\n A = torch.randint(-128, 127, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=(batch_dim, seq_dim, 1024), device=\"cuda\").to(torch.int8)\n out2 = torch.einsum(\"bsi, bso->io\", A.float(), B.float())\n iout = torch.empty(A.shape[2], B.shape[2], dtype=torch.int32, device=A.device)\n out = F.igemm(A, B, out=iout)\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [32, 1024 * 4], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 16], ids=id_formatter(\"batch_dim\"))","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_dim3_igemm","uri":"program://bitsandbytes/function/tests.test_functional.test_dim3_igemm#L449-L460","kind":"function","name":"test_dim3_igemm","path":"tests/test_functional.py","language":"python","start_line":449,"end_line":460,"context_start_line":429,"context_end_line":480,"code":"\n for i in range(k):\n shapeA = (batch_dim, seq_dim, hidden_dim)\n shapeB = (\n (32 * random.randint(1, 4), hidden_dim) if transpose[1] else (hidden_dim, 32 * random.randint(1, 4))\n )\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n if not transpose[0] and not transpose[1]:\n out2 = torch.matmul(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.matmul(A.float(), B.t().float())\n out = F.igemm(A, B.t())\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 256, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [64, 1024, 4096], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 8, 16], ids=id_formatter(\"batch_dim\"))\n def test_dim3_igemm(self, seq_dim, hidden_dim, batch_dim):\n seq_dim = seq_dim - (seq_dim % 32)\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 2)\n for i in range(25):\n A = torch.randint(-128, 127, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=(batch_dim, seq_dim, 1024), device=\"cuda\").to(torch.int8)\n out2 = torch.einsum(\"bsi, bso->io\", A.float(), B.float())\n iout = torch.empty(A.shape[2], B.shape[2], dtype=torch.int32, device=A.device)\n out = F.igemm(A, B, out=iout)\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [32, 1024 * 4], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 16], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"transpose\", TRUE_FALSE, ids=id_formatter(\"transpose\"))\n def test_minmax_igemm(self, seq_dim, hidden_dim, batch_dim, transpose):\n def min_max(x):\n maxA = torch.amax(x, dim=2, keepdim=True)\n minA = torch.amin(x, dim=2, keepdim=True)\n scale = (maxA - minA) / 2.0\n return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale\n\n seq_dim = seq_dim - (seq_dim % 16)\n hidden_dim = hidden_dim - (hidden_dim % 16)\n batch_dim = batch_dim - (batch_dim % 2)\n errs = []\n relerrs = []\n errs2 = []\n relerrs2 = []\n for i in range(k):","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_minmax_igemm","uri":"program://bitsandbytes/function/tests.test_functional.test_minmax_igemm#L466-L536","kind":"function","name":"test_minmax_igemm","path":"tests/test_functional.py","language":"python","start_line":466,"end_line":536,"context_start_line":446,"context_end_line":556,"code":" @pytest.mark.parametrize(\"seq_dim\", [32, 256, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [64, 1024, 4096], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 8, 16], ids=id_formatter(\"batch_dim\"))\n def test_dim3_igemm(self, seq_dim, hidden_dim, batch_dim):\n seq_dim = seq_dim - (seq_dim % 32)\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 2)\n for i in range(25):\n A = torch.randint(-128, 127, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=(batch_dim, seq_dim, 1024), device=\"cuda\").to(torch.int8)\n out2 = torch.einsum(\"bsi, bso->io\", A.float(), B.float())\n iout = torch.empty(A.shape[2], B.shape[2], dtype=torch.int32, device=A.device)\n out = F.igemm(A, B, out=iout)\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [32, 1024 * 4], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 16], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"transpose\", TRUE_FALSE, ids=id_formatter(\"transpose\"))\n def test_minmax_igemm(self, seq_dim, hidden_dim, batch_dim, transpose):\n def min_max(x):\n maxA = torch.amax(x, dim=2, keepdim=True)\n minA = torch.amin(x, dim=2, keepdim=True)\n scale = (maxA - minA) / 2.0\n return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale\n\n seq_dim = seq_dim - (seq_dim % 16)\n hidden_dim = hidden_dim - (hidden_dim % 16)\n batch_dim = batch_dim - (batch_dim % 2)\n errs = []\n relerrs = []\n errs2 = []\n relerrs2 = []\n for i in range(k):\n A = torch.normal(0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\")\n if transpose:\n B = torch.normal(0, 0.5, size=(256, hidden_dim), device=\"cuda\")\n else:\n B = torch.normal(0, 0.5, size=(hidden_dim, 256), device=\"cuda\")\n Ac, minA, scale = min_max(A)\n if transpose:\n maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))\n out = F.igemm(Ac, Bc.t())\n out2 = torch.matmul(A, B.t())\n offset = B.t().sum(0) * (minA + scale)\n out = out.float()\n out = (out * maxB.t() * scale / (127 * 127)) + offset\n\n maxA, Ac = quant_multi(A, dim=2)\n out3 = F.igemm(Ac, Bc.t())\n out3 = mm_dequant(maxA, maxB.t(), out3)\n else:\n maxB, Bc = quant_multi(B, dim=0)\n offset = B.sum(0) * (minA + scale)\n out = F.igemm(Ac, Bc)\n out2 = torch.matmul(A, B)\n out = out.float()\n out = (out * maxB * scale / (127 * 127)) + offset\n\n maxA, Ac = quant_multi(A, dim=2)\n out3 = F.igemm(Ac, Bc)\n out3 = mm_dequant(maxA, maxB, out3)\n\n std = out2.std()\n out2 /= std\n out /= std\n out3 /= std\n\n err = torch.abs(out - out2)\n relerr = err / (torch.abs(out2) + 1e-7)\n\n err2 = torch.abs(out3 - out2)\n relerr2 = err2 / (torch.abs(out2) + 1e-7)\n\n errs.append(err.mean().item())\n relerrs.append(relerr.mean().item())\n errs2.append(err2.mean().item())\n relerrs2.append(relerr2.mean().item())\n # print(mean(errs))\n # print(mean(relerrs))\n # print(mean(errs2))\n # print(mean(relerrs2))\n assert mean(errs) < 0.015\n\n # There's a higher relerr on L40S with torch 2.4+cu118.\n is_sm89 = torch.cuda.get_device_capability() == (8, 9)\n if torch.version.cuda == \"11.8\" and is_sm89 and torch.__version__ < (2, 5):\n assert mean(relerrs) < 0.41\n else:\n assert mean(relerrs) < 0.3\n\n @pytest.mark.parametrize(\"dim1\", [1, 64], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [32, 128], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [32, 256], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [32, 256], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_ibmm(self, dim1, dim2, dim3, dim4, transpose):\n dim2 = dim2 - (dim2 % 16)\n dim3 = dim3 - (dim3 % 16)\n dim4 = dim4 - (dim4 % 16)\n for i in range(k):\n shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)\n shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n\n if not transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_ibmm","uri":"program://bitsandbytes/function/tests.test_functional.test_ibmm#L543-L565","kind":"function","name":"test_ibmm","path":"tests/test_functional.py","language":"python","start_line":543,"end_line":565,"context_start_line":523,"context_end_line":585,"code":" errs2.append(err2.mean().item())\n relerrs2.append(relerr2.mean().item())\n # print(mean(errs))\n # print(mean(relerrs))\n # print(mean(errs2))\n # print(mean(relerrs2))\n assert mean(errs) < 0.015\n\n # There's a higher relerr on L40S with torch 2.4+cu118.\n is_sm89 = torch.cuda.get_device_capability() == (8, 9)\n if torch.version.cuda == \"11.8\" and is_sm89 and torch.__version__ < (2, 5):\n assert mean(relerrs) < 0.41\n else:\n assert mean(relerrs) < 0.3\n\n @pytest.mark.parametrize(\"dim1\", [1, 64], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [32, 128], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [32, 256], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [32, 256], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"transpose\", BOOLEAN_TUPLES, ids=id_formatter(\"transpose\"))\n def test_ibmm(self, dim1, dim2, dim3, dim4, transpose):\n dim2 = dim2 - (dim2 % 16)\n dim3 = dim3 - (dim3 % 16)\n dim4 = dim4 - (dim4 % 16)\n for i in range(k):\n shapeA = (dim1, dim3, dim2) if transpose[0] else (dim1, dim2, dim3)\n shapeB = (dim1, dim4, dim3) if transpose[1] else (dim1, dim3, dim4)\n A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n\n if not transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A, B.permute([0, 2, 1]))\n elif transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())\n out = F.igemm(A.permute([0, 2, 1]), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))\n torch.testing.assert_close(out.float(), out2.float())\n\n\nclass TestLLMInt8Functional:\n @staticmethod\n def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half):\n \"\"\"Reference implementation for the F.int8_mm_dequant function.\"\"\"\n C = 127.0\n\n x = xq.float()\n if len(S1.shape) == 3 and len(x.shape) == 2:\n S1 = S1.squeeze(0)\n if len(S2.shape) == 3 and len(x.shape) == 2:\n S2 = S2.squeeze(0)\n if len(S1.shape) == 2:\n x *= S1 / C\n else:\n x *= S1 / C\n x *= S2 / C\n return x.to(dtype)\n","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.vectorwise_mm_dequant","uri":"program://bitsandbytes/function/tests.test_functional.vectorwise_mm_dequant#L570-L584","kind":"function","name":"vectorwise_mm_dequant","path":"tests/test_functional.py","language":"python","start_line":570,"end_line":584,"context_start_line":550,"context_end_line":604,"code":" A = torch.randint(-128, 127, size=shapeA, device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=shapeB, device=\"cuda\").to(torch.int8)\n\n if not transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.float(), B.float())\n out = F.igemm(A, B)\n elif not transpose[0] and transpose[1]:\n out2 = torch.bmm(A.float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A, B.permute([0, 2, 1]))\n elif transpose[0] and not transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.float())\n out = F.igemm(A.permute([0, 2, 1]), B)\n elif transpose[0] and transpose[1]:\n out2 = torch.bmm(A.permute([0, 2, 1]).float(), B.permute([0, 2, 1]).float())\n out = F.igemm(A.permute([0, 2, 1]), B.permute([0, 2, 1]))\n torch.testing.assert_close(out.float(), out2.float())\n\n\nclass TestLLMInt8Functional:\n @staticmethod\n def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half):\n \"\"\"Reference implementation for the F.int8_mm_dequant function.\"\"\"\n C = 127.0\n\n x = xq.float()\n if len(S1.shape) == 3 and len(x.shape) == 2:\n S1 = S1.squeeze(0)\n if len(S2.shape) == 3 and len(x.shape) == 2:\n S2 = S2.squeeze(0)\n if len(S1.shape) == 2:\n x *= S1 / C\n else:\n x *= S1 / C\n x *= S2 / C\n return x.to(dtype)\n\n @staticmethod\n def vectorwise_quant(x, dim=1):\n \"\"\"Reference implementation\"\"\"\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n xq = torch.round(x * (127.0 / max1)).to(torch.int8)\n return xq, max1\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [128], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [256], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [499, 512], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [512], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2, 3), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"ldb\", (0,), ids=id_formatter(\"ldb\"))\n def test_int8_linear_matmul(self, device, dim1, dim2, dim3, dim4, dims, ldb):\n for i in range(k):\n if dims == 2:\n A = torch.randint(-128, 127, size=(dim1, dim3), dtype=torch.int8, device=device)\n elif dims == 3:","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.vectorwise_quant","uri":"program://bitsandbytes/function/tests.test_functional.vectorwise_quant#L587-L591","kind":"function","name":"vectorwise_quant","path":"tests/test_functional.py","language":"python","start_line":587,"end_line":591,"context_start_line":567,"context_end_line":611,"code":"\nclass TestLLMInt8Functional:\n @staticmethod\n def vectorwise_mm_dequant(xq, S1, S2, dtype=torch.half):\n \"\"\"Reference implementation for the F.int8_mm_dequant function.\"\"\"\n C = 127.0\n\n x = xq.float()\n if len(S1.shape) == 3 and len(x.shape) == 2:\n S1 = S1.squeeze(0)\n if len(S2.shape) == 3 and len(x.shape) == 2:\n S2 = S2.squeeze(0)\n if len(S1.shape) == 2:\n x *= S1 / C\n else:\n x *= S1 / C\n x *= S2 / C\n return x.to(dtype)\n\n @staticmethod\n def vectorwise_quant(x, dim=1):\n \"\"\"Reference implementation\"\"\"\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n xq = torch.round(x * (127.0 / max1)).to(torch.int8)\n return xq, max1\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [128], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [256], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [499, 512], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [512], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2, 3), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"ldb\", (0,), ids=id_formatter(\"ldb\"))\n def test_int8_linear_matmul(self, device, dim1, dim2, dim3, dim4, dims, ldb):\n for i in range(k):\n if dims == 2:\n A = torch.randint(-128, 127, size=(dim1, dim3), dtype=torch.int8, device=device)\n elif dims == 3:\n A = torch.randint(-128, 127, size=(dim1, dim2, dim3), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, size=(dim4, dim3), dtype=torch.int8, device=device)\n C1 = torch.matmul(A.float(), B.t().float())\n\n C2 = F.int8_linear_matmul(A, B)\n torch.testing.assert_close(C1, C2.float())\n","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_int8_linear_matmul","uri":"program://bitsandbytes/function/tests.test_functional.test_int8_linear_matmul#L600-L610","kind":"function","name":"test_int8_linear_matmul","path":"tests/test_functional.py","language":"python","start_line":600,"end_line":610,"context_start_line":580,"context_end_line":630,"code":" x *= S1 / C\n else:\n x *= S1 / C\n x *= S2 / C\n return x.to(dtype)\n\n @staticmethod\n def vectorwise_quant(x, dim=1):\n \"\"\"Reference implementation\"\"\"\n max1 = torch.amax(torch.abs(x), dim=dim, keepdim=True)\n xq = torch.round(x * (127.0 / max1)).to(torch.int8)\n return xq, max1\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [128], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [256], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [499, 512], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [512], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2, 3), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"ldb\", (0,), ids=id_formatter(\"ldb\"))\n def test_int8_linear_matmul(self, device, dim1, dim2, dim3, dim4, dims, ldb):\n for i in range(k):\n if dims == 2:\n A = torch.randint(-128, 127, size=(dim1, dim3), dtype=torch.int8, device=device)\n elif dims == 3:\n A = torch.randint(-128, 127, size=(dim1, dim2, dim3), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, size=(dim4, dim3), dtype=torch.int8, device=device)\n C1 = torch.matmul(A.float(), B.t().float())\n\n C2 = F.int8_linear_matmul(A, B)\n torch.testing.assert_close(C1, C2.float())\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [32], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [32], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [32], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [32], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n def test_int8_linear_matmul_half(self, device, dim1, dim2, dim3, dim4, dims):\n for i in range(k):\n if dims == 2:\n A = torch.normal(0, 0.5, size=(dim1, dim3), device=device).half()\n elif dims == 3:\n A = torch.normal(0, 0.5, size=(dim1, dim2, dim3), device=device).half()\n B = torch.randn((dim4, dim3), device=device).half()\n torch.nn.init.xavier_uniform_(B)\n C1 = torch.matmul(A, B.t())\n\n A = A.view(-1, A.shape[-1])\n\n CA, statsA, _ = F.int8_vectorwise_quant(A)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_int8_linear_matmul_half","uri":"program://bitsandbytes/function/tests.test_functional.test_int8_linear_matmul_half#L618-L634","kind":"function","name":"test_int8_linear_matmul_half","path":"tests/test_functional.py","language":"python","start_line":618,"end_line":634,"context_start_line":598,"context_end_line":654,"code":" @pytest.mark.parametrize(\"dims\", (2, 3), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"ldb\", (0,), ids=id_formatter(\"ldb\"))\n def test_int8_linear_matmul(self, device, dim1, dim2, dim3, dim4, dims, ldb):\n for i in range(k):\n if dims == 2:\n A = torch.randint(-128, 127, size=(dim1, dim3), dtype=torch.int8, device=device)\n elif dims == 3:\n A = torch.randint(-128, 127, size=(dim1, dim2, dim3), dtype=torch.int8, device=device)\n B = torch.randint(-128, 127, size=(dim4, dim3), dtype=torch.int8, device=device)\n C1 = torch.matmul(A.float(), B.t().float())\n\n C2 = F.int8_linear_matmul(A, B)\n torch.testing.assert_close(C1, C2.float())\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [32], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [32], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dim3\", [32], ids=id_formatter(\"dim3\"))\n @pytest.mark.parametrize(\"dim4\", [32], ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n def test_int8_linear_matmul_half(self, device, dim1, dim2, dim3, dim4, dims):\n for i in range(k):\n if dims == 2:\n A = torch.normal(0, 0.5, size=(dim1, dim3), device=device).half()\n elif dims == 3:\n A = torch.normal(0, 0.5, size=(dim1, dim2, dim3), device=device).half()\n B = torch.randn((dim4, dim3), device=device).half()\n torch.nn.init.xavier_uniform_(B)\n C1 = torch.matmul(A, B.t())\n\n A = A.view(-1, A.shape[-1])\n\n CA, statsA, _ = F.int8_vectorwise_quant(A)\n CB, statsB, _ = F.int8_vectorwise_quant(B)\n output = F.int8_mm_dequant(F.int8_linear_matmul(CA, CB), statsA, statsB)\n\n torch.testing.assert_close(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", (64, 256), ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim4\", (64, 1024), ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\n def test_dequant_mm(self, device, dim1, dim4, dims, has_bias):\n inner = 128\n bias = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=torch.float16)\n\n for i in range(1):\n A = torch.randn(dim1, inner, device=device)\n B = torch.randn(dim4, inner, device=device)\n C1 = torch.matmul(A.half(), B.t().half())\n if has_bias:\n C1 += bias\n\n A1, maxA = self.vectorwise_quant(A, dim=1)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_dequant_mm","uri":"program://bitsandbytes/function/tests.test_functional.test_dequant_mm#L641-L676","kind":"function","name":"test_dequant_mm","path":"tests/test_functional.py","language":"python","start_line":641,"end_line":676,"context_start_line":621,"context_end_line":696,"code":" A = torch.normal(0, 0.5, size=(dim1, dim3), device=device).half()\n elif dims == 3:\n A = torch.normal(0, 0.5, size=(dim1, dim2, dim3), device=device).half()\n B = torch.randn((dim4, dim3), device=device).half()\n torch.nn.init.xavier_uniform_(B)\n C1 = torch.matmul(A, B.t())\n\n A = A.view(-1, A.shape[-1])\n\n CA, statsA, _ = F.int8_vectorwise_quant(A)\n CB, statsB, _ = F.int8_vectorwise_quant(B)\n output = F.int8_mm_dequant(F.int8_linear_matmul(CA, CB), statsA, statsB)\n\n torch.testing.assert_close(C1.view(-1, C1.shape[-1]), output, atol=0.025, rtol=0.05)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", (64, 256), ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim4\", (64, 1024), ids=id_formatter(\"dim4\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"has_bias\", TRUE_FALSE, ids=id_formatter(\"has_bias\"))\n def test_dequant_mm(self, device, dim1, dim4, dims, has_bias):\n inner = 128\n bias = None\n if has_bias:\n bias = torch.randn(dim4, device=device, dtype=torch.float16)\n\n for i in range(1):\n A = torch.randn(dim1, inner, device=device)\n B = torch.randn(dim4, inner, device=device)\n C1 = torch.matmul(A.half(), B.t().half())\n if has_bias:\n C1 += bias\n\n A1, maxA = self.vectorwise_quant(A, dim=1)\n B1, maxB = self.vectorwise_quant(B, dim=1)\n\n C2 = F.int8_linear_matmul(A1, B1)\n\n C4 = self.vectorwise_mm_dequant(C2.float(), maxA, maxB.t())\n if has_bias:\n C4 += bias\n\n # TODO: is something wrong here? If so, the problem goes deeper\n # n = C1.numel()\n # p = 0.06\n std = C1.std(0).view(1, -1)\n C1 /= std\n C4 /= std\n # assert_all_approx_close(C1, C4, atol=0.02, rtol=0.1, count=int(n*0.06))\n # assert (count / n < p), f\"error in more than {p} of elements: {count}/{n}={count/n}\"\n\n C5 = F.int8_mm_dequant(C2, maxA, maxB, bias=bias)\n C5 /= std\n torch.testing.assert_close(C5, C4, atol=0.015, rtol=0.1)\n n = C5.numel()\n assert_all_approx_close(C1, C4, atol=0.015, rtol=0.1, count=int(0.01 * n))\n\n @pytest.mark.parametrize(\"dim1\", [1 * 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1 * 1024], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"decomp\"))\n @pytest.mark.deprecated\n def test_colrow_absmax(self, dim1, dim2, dims, threshold):\n for i in range(k):\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n\n assert dims == 2\n\n row_stats1, _ = torch.abs(A.float()).max(1)\n col_stats1, _ = torch.abs(A.float()).max(0)\n\n if threshold > 0.0:\n A_truncated = A.clone()\n A_truncated[torch.abs(A_truncated) >= threshold] = 0.0\n row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)\n col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_colrow_absmax","uri":"program://bitsandbytes/function/tests.test_functional.test_colrow_absmax#L683-L715","kind":"function","name":"test_colrow_absmax","path":"tests/test_functional.py","language":"python","start_line":683,"end_line":715,"context_start_line":663,"context_end_line":735,"code":" # TODO: is something wrong here? If so, the problem goes deeper\n # n = C1.numel()\n # p = 0.06\n std = C1.std(0).view(1, -1)\n C1 /= std\n C4 /= std\n # assert_all_approx_close(C1, C4, atol=0.02, rtol=0.1, count=int(n*0.06))\n # assert (count / n < p), f\"error in more than {p} of elements: {count}/{n}={count/n}\"\n\n C5 = F.int8_mm_dequant(C2, maxA, maxB, bias=bias)\n C5 /= std\n torch.testing.assert_close(C5, C4, atol=0.015, rtol=0.1)\n n = C5.numel()\n assert_all_approx_close(C1, C4, atol=0.015, rtol=0.1, count=int(0.01 * n))\n\n @pytest.mark.parametrize(\"dim1\", [1 * 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1 * 1024], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dims\", (2,), ids=id_formatter(\"dims\"))\n @pytest.mark.parametrize(\"threshold\", [0.0, 3.0], ids=id_formatter(\"decomp\"))\n @pytest.mark.deprecated\n def test_colrow_absmax(self, dim1, dim2, dims, threshold):\n for i in range(k):\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n\n assert dims == 2\n\n row_stats1, _ = torch.abs(A.float()).max(1)\n col_stats1, _ = torch.abs(A.float()).max(0)\n\n if threshold > 0.0:\n A_truncated = A.clone()\n A_truncated[torch.abs(A_truncated) >= threshold] = 0.0\n row_stats1_trunc, _ = torch.abs(A_truncated.float()).max(1)\n col_stats1_trunc, _ = torch.abs(A_truncated.float()).max(0)\n\n row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(A, threshold=threshold)\n\n nnz_rows1_counts = (torch.abs(A) >= threshold).sum(1).flatten()\n nnz_block_ptr1 = torch.zeros(\n nnz_rows1_counts.shape[0] + 1,\n dtype=nnz_rows1_counts.dtype,\n device=nnz_rows1_counts.device,\n )\n nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)\n\n torch.testing.assert_close(col_stats1_trunc, col_stats2)\n torch.testing.assert_close(row_stats1_trunc, row_stats2)\n # torch.testing.assert_close(nnz_block_ptr1, nnz_block_ptr2)\n else:\n row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(A, threshold=0.0)\n assert nnz_block_ptr2 is None\n torch.testing.assert_close(col_stats1, col_stats2)\n torch.testing.assert_close(row_stats1, row_stats2)\n\n @pytest.mark.parametrize(\"dim1\", [2048, 4096], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [512, 1024], ids=id_formatter(\"dim2\"))\n @pytest.mark.deprecated\n def test_int8_double_quant(self, dim1, dim2):\n for i in range(k):\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n out_col1, Scol = self.vectorwise_quant(A, dim=0)\n out_row1, Srow = self.vectorwise_quant(A, dim=1)\n\n CA, CAt, statsA, statsAt, _ = F.int8_double_quant(A)\n\n # max difference is 1 due to rounding differences\n torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)\n torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)\n\n n = CAt.numel()\n num_not_close_rows = (torch.isclose(CA, out_row1, atol=1) == 0).sum().item()\n num_not_close_cols = (torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()\n","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_int8_double_quant","uri":"program://bitsandbytes/function/tests.test_functional.test_int8_double_quant#L720-L750","kind":"function","name":"test_int8_double_quant","path":"tests/test_functional.py","language":"python","start_line":720,"end_line":750,"context_start_line":700,"context_end_line":770,"code":" nnz_rows1_counts = (torch.abs(A) >= threshold).sum(1).flatten()\n nnz_block_ptr1 = torch.zeros(\n nnz_rows1_counts.shape[0] + 1,\n dtype=nnz_rows1_counts.dtype,\n device=nnz_rows1_counts.device,\n )\n nnz_block_ptr1[1:] = nnz_rows1_counts.cumsum(0)\n\n torch.testing.assert_close(col_stats1_trunc, col_stats2)\n torch.testing.assert_close(row_stats1_trunc, row_stats2)\n # torch.testing.assert_close(nnz_block_ptr1, nnz_block_ptr2)\n else:\n row_stats2, col_stats2, nnz_block_ptr2 = F.get_colrow_absmax(A, threshold=0.0)\n assert nnz_block_ptr2 is None\n torch.testing.assert_close(col_stats1, col_stats2)\n torch.testing.assert_close(row_stats1, row_stats2)\n\n @pytest.mark.parametrize(\"dim1\", [2048, 4096], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [512, 1024], ids=id_formatter(\"dim2\"))\n @pytest.mark.deprecated\n def test_int8_double_quant(self, dim1, dim2):\n for i in range(k):\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n out_col1, Scol = self.vectorwise_quant(A, dim=0)\n out_row1, Srow = self.vectorwise_quant(A, dim=1)\n\n CA, CAt, statsA, statsAt, _ = F.int8_double_quant(A)\n\n # max difference is 1 due to rounding differences\n torch.testing.assert_close(CA, out_row1, atol=1, rtol=0)\n torch.testing.assert_close(CAt, out_col1, atol=1, rtol=0)\n\n n = CAt.numel()\n num_not_close_rows = (torch.isclose(CA, out_row1, atol=1) == 0).sum().item()\n num_not_close_cols = (torch.isclose(CAt, out_col1, atol=1) == 0).sum().item()\n\n # allow for 1:500 error due to rounding differences\n min_error = 1 / 500\n if num_not_close_cols > (min_error * n):\n print(\n f\"Min error exceeded {num_not_close_cols} elements are different. Error: {num_not_close_cols / n:.4f}\"\n )\n assert False\n if num_not_close_rows > (min_error * n):\n print(\n f\"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows / n:.4f}\"\n )\n assert False\n\n torch.testing.assert_close(Srow.flatten().float(), statsA)\n torch.testing.assert_close(Scol.flatten().float(), statsAt)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\n (\"dim1\", \"dim4\", \"inner\"),\n (\n pytest.param(dim1, dim4, inner, id=f\"{dim1=},{dim4=},{inner=}\")\n for (dim1, dim4, inner) in zip(\n (1, 8, 2048, 4096),\n (2, 128, 2048, 4096),\n (4, 256, 512, 4096),\n )\n ),\n )\n def test_integrated_int8_linear_matmul(self, device, dim1, dim4, inner):\n if device == \"cpu\" and inner > 2048:\n pytest.skip(\"Slow on CPU\")\n\n for i in range(k):\n A = torch.randn(dim1, inner, device=device).half()\n B = torch.randn(dim4, inner, device=device).half()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_integrated_int8_linear_matmul","uri":"program://bitsandbytes/function/tests.test_functional.test_integrated_int8_linear_matmul#L764-L792","kind":"function","name":"test_integrated_int8_linear_matmul","path":"tests/test_functional.py","language":"python","start_line":764,"end_line":792,"context_start_line":744,"context_end_line":812,"code":" print(\n f\"Min error exceeded {num_not_close_rows} elements are different. Error: {num_not_close_rows / n:.4f}\"\n )\n assert False\n\n torch.testing.assert_close(Srow.flatten().float(), statsA)\n torch.testing.assert_close(Scol.flatten().float(), statsAt)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\n (\"dim1\", \"dim4\", \"inner\"),\n (\n pytest.param(dim1, dim4, inner, id=f\"{dim1=},{dim4=},{inner=}\")\n for (dim1, dim4, inner) in zip(\n (1, 8, 2048, 4096),\n (2, 128, 2048, 4096),\n (4, 256, 512, 4096),\n )\n ),\n )\n def test_integrated_int8_linear_matmul(self, device, dim1, dim4, inner):\n if device == \"cpu\" and inner > 2048:\n pytest.skip(\"Slow on CPU\")\n\n for i in range(k):\n A = torch.randn(dim1, inner, device=device).half()\n B = torch.randn(dim4, inner, device=device).half()\n\n out1 = torch.matmul(A.half(), B.t().half())\n\n C1a, stats1a, _ = F.int8_vectorwise_quant(A)\n C2a, stats2a, _ = F.int8_vectorwise_quant(B)\n A1, maxA = self.vectorwise_quant(A, dim=1)\n B1, maxB = self.vectorwise_quant(B, dim=1)\n\n torch.testing.assert_close(maxA.flatten().float(), stats1a)\n torch.testing.assert_close(maxB.flatten().float(), stats2a)\n torch.testing.assert_close(C1a, A1, rtol=0, atol=1)\n torch.testing.assert_close(C2a, B1, rtol=0, atol=1)\n\n out2 = F.int8_linear_matmul(A1, B1)\n\n C2 = F.int8_linear_matmul(A1, B1)\n\n out3 = self.vectorwise_mm_dequant(C2.float(), maxA, maxB.t())\n\n err1 = torch.abs(out1 - out2).mean().item()\n err2 = torch.abs(out1 - out3).mean().item()\n assert err2 <= err1 * 1.025\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_double_quant(self, device, dim1, dim2):\n threshold = 2.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A1 = A * idx\n A2 = torch.zeros_like(A) + A1\n torch.testing.assert_close(A1, A2)\n\n A[:, outlier_cols] = 0\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n torch.testing.assert_close(A, A2, rtol=0.05, atol=1.5e-2)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_coo_double_quant","uri":"program://bitsandbytes/function/tests.test_functional.test_coo_double_quant#L797-L812","kind":"function","name":"test_coo_double_quant","path":"tests/test_functional.py","language":"python","start_line":797,"end_line":812,"context_start_line":777,"context_end_line":832,"code":" B1, maxB = self.vectorwise_quant(B, dim=1)\n\n torch.testing.assert_close(maxA.flatten().float(), stats1a)\n torch.testing.assert_close(maxB.flatten().float(), stats2a)\n torch.testing.assert_close(C1a, A1, rtol=0, atol=1)\n torch.testing.assert_close(C2a, B1, rtol=0, atol=1)\n\n out2 = F.int8_linear_matmul(A1, B1)\n\n C2 = F.int8_linear_matmul(A1, B1)\n\n out3 = self.vectorwise_mm_dequant(C2.float(), maxA, maxB.t())\n\n err1 = torch.abs(out1 - out2).mean().item()\n err2 = torch.abs(out1 - out3).mean().item()\n assert err2 <= err1 * 1.025\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_double_quant(self, device, dim1, dim2):\n threshold = 2.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A1 = A * idx\n A2 = torch.zeros_like(A) + A1\n torch.testing.assert_close(A1, A2)\n\n A[:, outlier_cols] = 0\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n torch.testing.assert_close(A, A2, rtol=0.05, atol=1.5e-2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_int8_vectorwise_quant(self, device, dim1, dim2):\n threshold = 3.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n A[:, outlier_cols] = 0\n torch.testing.assert_close(A * (idx == 0), A2, rtol=0.05, atol=1.5e-2)\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm yet\")\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_coo_int8_vectorwise_quant","uri":"program://bitsandbytes/function/tests.test_functional.test_coo_int8_vectorwise_quant#L817-L828","kind":"function","name":"test_coo_int8_vectorwise_quant","path":"tests/test_functional.py","language":"python","start_line":817,"end_line":828,"context_start_line":797,"context_end_line":848,"code":" def test_coo_double_quant(self, device, dim1, dim2):\n threshold = 2.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A1 = A * idx\n A2 = torch.zeros_like(A) + A1\n torch.testing.assert_close(A1, A2)\n\n A[:, outlier_cols] = 0\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n torch.testing.assert_close(A, A2, rtol=0.05, atol=1.5e-2)\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dim1\", [512, 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [1024, 4096], ids=id_formatter(\"dim2\"))\n def test_coo_int8_vectorwise_quant(self, device, dim1, dim2):\n threshold = 3.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n A[:, outlier_cols] = 0\n torch.testing.assert_close(A * (idx == 0), A2, rtol=0.05, atol=1.5e-2)\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm yet\")\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSpMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [256, 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [128, 512], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"transposed_B\", TRUE_FALSE, ids=id_formatter(\"transposed_B\"))\n def test_spmm_coo(self, dim1, dim2, transposed_B):\n threshold = 1.5\n dim3 = torch.randint(32, 128, size=(1,)).item()\n # dim3 = 17\n for i in range(k):\n A = torch.randn(dim1, dim2).cuda().half()\n if transposed_B:\n B = torch.randn(dim3, dim2).cuda().half()\n else:\n B = torch.randn(dim2, dim3).cuda().half()\n\n idx = torch.abs(A) >= threshold","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_spmm_coo","uri":"program://bitsandbytes/function/tests.test_functional.test_spmm_coo#L837-L862","kind":"function","name":"test_spmm_coo","path":"tests/test_functional.py","language":"python","start_line":837,"end_line":862,"context_start_line":817,"context_end_line":882,"code":" def test_coo_int8_vectorwise_quant(self, device, dim1, dim2):\n threshold = 3.00\n for i in range(k):\n A = torch.randn(dim1, dim2, device=device).half()\n\n idx = torch.abs(A) >= threshold\n CA, statsA, outlier_cols = F.int8_vectorwise_quant(A, threshold=threshold)\n\n if outlier_cols is not None:\n A2 = (CA.float() * statsA.unsqueeze(1) / 127).half()\n A[:, outlier_cols] = 0\n torch.testing.assert_close(A * (idx == 0), A2, rtol=0.05, atol=1.5e-2)\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm yet\")\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSpMMFunctional:\n @pytest.mark.parametrize(\"dim1\", [256, 1024], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [128, 512], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"transposed_B\", TRUE_FALSE, ids=id_formatter(\"transposed_B\"))\n def test_spmm_coo(self, dim1, dim2, transposed_B):\n threshold = 1.5\n dim3 = torch.randint(32, 128, size=(1,)).item()\n # dim3 = 17\n for i in range(k):\n A = torch.randn(dim1, dim2).cuda().half()\n if transposed_B:\n B = torch.randn(dim3, dim2).cuda().half()\n else:\n B = torch.randn(dim2, dim3).cuda().half()\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n\n if transposed_B:\n out2 = F.spmm_coo(cooA, B.t())\n out1 = torch.matmul(A2, B.t())\n else:\n out2 = F.spmm_coo(cooA, B)\n out1 = torch.matmul(A2, B)\n\n assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=30)\n\n @pytest.mark.benchmark\n def test_spmm_bench(self):\n batch = 2\n model = 1024 * 1\n hidden = model * 4\n seq = 1024\n dim1 = batch * seq\n dim2 = model\n dim3 = hidden\n threshold = 4\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n B = torch.randn(dim2, dim3, device=\"cuda\").half()\n for i in range(10):\n C1 = bnb.matmul(A, B.t())\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n C1 = bnb.matmul(A, B.t())","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_spmm_bench","uri":"program://bitsandbytes/function/tests.test_functional.test_spmm_bench#L865-L903","kind":"function","name":"test_spmm_bench","path":"tests/test_functional.py","language":"python","start_line":865,"end_line":903,"context_start_line":845,"context_end_line":923,"code":" else:\n B = torch.randn(dim2, dim3).cuda().half()\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n\n if transposed_B:\n out2 = F.spmm_coo(cooA, B.t())\n out1 = torch.matmul(A2, B.t())\n else:\n out2 = F.spmm_coo(cooA, B)\n out1 = torch.matmul(A2, B)\n\n assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=30)\n\n @pytest.mark.benchmark\n def test_spmm_bench(self):\n batch = 2\n model = 1024 * 1\n hidden = model * 4\n seq = 1024\n dim1 = batch * seq\n dim2 = model\n dim3 = hidden\n threshold = 4\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n B = torch.randn(dim2, dim3, device=\"cuda\").half()\n for i in range(10):\n C1 = bnb.matmul(A, B.t())\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n C1 = bnb.matmul(A, B.t())\n torch.cuda.synchronize()\n t8 = time.time() - t0\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n print(nnz / idx.numel())\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n\n for i in range(10):\n out2 = F.spmm_coo(cooA, B)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n out2 = F.spmm_coo(cooA, B)\n torch.cuda.synchronize()\n tsp = time.time() - t0\n print(tsp, t8)\n print(tsp / t8)\n\n @pytest.mark.parametrize(\"dim1\", [1 * 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [12288], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n @pytest.mark.parametrize(\"out_func\", [\"zeros\", \"ones\"], ids=id_formatter(\"out_func\"))\n def test_spmm_coo_very_sparse(self, dim1, dim2, dtype, out_func):\n out_func = getattr(torch, out_func)\n\n threshold = 3.3\n # threshold = 2.8\n # threshold = 0.0\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n if dtype == torch.float16:\n B = torch.randn(dim2, dim2 * 4, device=\"cuda\").half()\n torch.nn.init.xavier_uniform_(B)\n else:\n B = torch.randn(dim2, dim2 * 4, device=\"cuda\").half()\n torch.nn.init.xavier_uniform_(B)\n\n SB = torch.abs(B).max().float()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_spmm_coo_very_sparse","uri":"program://bitsandbytes/function/tests.test_functional.test_spmm_coo_very_sparse#L909-L949","kind":"function","name":"test_spmm_coo_very_sparse","path":"tests/test_functional.py","language":"python","start_line":909,"end_line":949,"context_start_line":889,"context_end_line":969,"code":" rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n\n for i in range(10):\n out2 = F.spmm_coo(cooA, B)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n out2 = F.spmm_coo(cooA, B)\n torch.cuda.synchronize()\n tsp = time.time() - t0\n print(tsp, t8)\n print(tsp / t8)\n\n @pytest.mark.parametrize(\"dim1\", [1 * 2048], ids=id_formatter(\"dim1\"))\n @pytest.mark.parametrize(\"dim2\", [12288], ids=id_formatter(\"dim2\"))\n @pytest.mark.parametrize(\"dtype\", [torch.float16], ids=describe_dtype)\n @pytest.mark.parametrize(\"out_func\", [\"zeros\", \"ones\"], ids=id_formatter(\"out_func\"))\n def test_spmm_coo_very_sparse(self, dim1, dim2, dtype, out_func):\n out_func = getattr(torch, out_func)\n\n threshold = 3.3\n # threshold = 2.8\n # threshold = 0.0\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n if dtype == torch.float16:\n B = torch.randn(dim2, dim2 * 4, device=\"cuda\").half()\n torch.nn.init.xavier_uniform_(B)\n else:\n B = torch.randn(dim2, dim2 * 4, device=\"cuda\").half()\n torch.nn.init.xavier_uniform_(B)\n\n SB = torch.abs(B).max().float()\n B = torch.round(B / SB * 127).to(torch.int8)\n\n print(\"\")\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n out1 = torch.matmul(A2.half(), B.half())\n out = out_func(out1.shape, dtype=torch.float16, device=out1.device)\n out1 += out.clone()\n out2 = F.spmm_coo_very_sparse(cooA, B, out=out)\n # print(B)\n # print(out1)\n # print(out2)\n p = 200 / (2048 * 12288 * 4)\n n = out1.numel()\n count = math.ceil(p * n)\n std = out1.std()\n out1 /= std\n out2 /= std\n assert_all_approx_close(out1, out2.half(), rtol=0.01, atol=3.0e-2, count=count)\n # assert_all_approx_close(out1, out2.half(), rtol=0.05, atol=0.01, count=count)\n\n idx_col = torch.randint(0, A2.shape[-1], size=(15,))\n\n # torch.testing.assert_close(out1, out2.half(), rtol=0.05, atol=0.001)\n\n # Bt = torch.randn(dim2*4, dim2, device='cuda').half()\n # torch.cuda.synchronize()\n # t0 = time.time()\n # print(A2.shape, B.shape)\n # for i in range(100):\n # #out3 = F.spmm_coo(cooA, Bt.t())\n # #out2 = F.spmm_coo(cooA, B)\n # #out2 = F.spmm_coo_very_sparse(cooA, B)\n # #out1 = torch.matmul(A, Bt.t())\n\n # torch.cuda.synchronize()\n # print(time.time() - t0)\n\n @pytest.mark.parametrize(\"dim1\", [1 * 2048])\n @pytest.mark.parametrize(\"dim2\", [2048])\n @pytest.mark.parametrize(\"dtype\", [torch.int8])\n def test_spmm_coo_dequant(self, dim1, dim2, dtype):","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_spmm_coo_dequant","uri":"program://bitsandbytes/function/tests.test_functional.test_spmm_coo_dequant#L969-L1064","kind":"function","name":"test_spmm_coo_dequant","path":"tests/test_functional.py","language":"python","start_line":969,"end_line":1064,"context_start_line":949,"context_end_line":1084,"code":" idx_col = torch.randint(0, A2.shape[-1], size=(15,))\n\n # torch.testing.assert_close(out1, out2.half(), rtol=0.05, atol=0.001)\n\n # Bt = torch.randn(dim2*4, dim2, device='cuda').half()\n # torch.cuda.synchronize()\n # t0 = time.time()\n # print(A2.shape, B.shape)\n # for i in range(100):\n # #out3 = F.spmm_coo(cooA, Bt.t())\n # #out2 = F.spmm_coo(cooA, B)\n # #out2 = F.spmm_coo_very_sparse(cooA, B)\n # #out1 = torch.matmul(A, Bt.t())\n\n # torch.cuda.synchronize()\n # print(time.time() - t0)\n\n @pytest.mark.parametrize(\"dim1\", [1 * 2048])\n @pytest.mark.parametrize(\"dim2\", [2048])\n @pytest.mark.parametrize(\"dtype\", [torch.int8])\n def test_spmm_coo_dequant(self, dim1, dim2, dtype):\n threshold = 6.0\n # threshold = 2.8\n # threshold = 0.0\n A = torch.randn(dim1, dim2, device=\"cuda\").half()\n B = torch.empty(dim2, dim2 * 4, device=\"cuda\", dtype=torch.float16)\n torch.nn.init.xavier_uniform_(B)\n Bt = B.t().contiguous()\n\n CB, CBt, statsB, statsBt, coo_tensor = F.int8_double_quant(B)\n\n rowidx = torch.randint(0, A.shape[-1], size=(15,))\n\n A[:, rowidx] = 8.0\n\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)\n out1 = torch.matmul(A2, B.half())\n out3 = F.spmm_coo_very_sparse(cooA, CBt.half())\n out3 = out3 * statsBt.half() / 127\n\n values, counts = torch.unique(cooA.rowidx, return_counts=True)\n offset = counts.cumsum(0).int()\n max_count, max_idx = torch.sort(counts, descending=True)\n print(torch.median(max_count.float()))\n\n torch.testing.assert_close(out2, out3, rtol=0.05, atol=0.001)\n\n p = 200 / (2048 * 12288 * 4)\n n = out1.numel()\n count = math.ceil(p * n)\n assert_all_approx_close(out1, out2, rtol=0.01, atol=3.0e-2, count=count)\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(100):\n # out2 = F.spmm_coo_very_sparse(cooA, B)\n # torch.cuda.synchronize()\n # print('fp16', time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = F.spmm_coo(cooA, B)\n torch.cuda.synchronize()\n print(\"cusparse fp16\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = F.spmm_coo_very_sparse(cooA, CBt)\n torch.cuda.synchronize()\n print(\"int8\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)\n torch.cuda.synchronize()\n print(\"int8+dequant\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out2 = torch.matmul(A, B)\n torch.cuda.synchronize()\n print(\"matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n out2 = F.spmm_coo_very_sparse(cooA, CBt, dequant_stats=statsBt)\n out = out1 + out2\n torch.cuda.synchronize()\n print(\"sparse+ matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSparseTensorFunctional:\n def test_coo2csr(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n csrA = F.coo2csr(cooA)\n counts = csrA.rowptr[1:] - csrA.rowptr[:-1]\n assert counts.numel() == A.shape[0]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))\n idx = A2 != 0\n torch.testing.assert_close(A2[idx], csrA.values)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_coo2csr","uri":"program://bitsandbytes/function/tests.test_functional.test_coo2csr#L1069-L1084","kind":"function","name":"test_coo2csr","path":"tests/test_functional.py","language":"python","start_line":1069,"end_line":1084,"context_start_line":1049,"context_end_line":1104,"code":" print(\"sparse+ matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.matmul(A[:, rowidx], Bt.t()[rowidx], out=out1)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(100):\n out1 = bnb.matmul(A, Bt)\n torch.cuda.synchronize()\n print(\"partial matmul\", time.time() - t0)\n\n\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSparseTensorFunctional:\n def test_coo2csr(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n csrA = F.coo2csr(cooA)\n counts = csrA.rowptr[1:] - csrA.rowptr[:-1]\n assert counts.numel() == A.shape[0]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))\n idx = A2 != 0\n torch.testing.assert_close(A2[idx], csrA.values)\n\n def test_coo2csc(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n cscA = F.coo2csc(cooA)\n counts = cscA.colptr[1:] - cscA.colptr[:-1]\n assert counts.numel() == A.shape[1]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))\n # torch uses row-major -> use transpose to transfer to col-major\n idx = A2.t() != 0\n torch.testing.assert_close(A2.t()[idx], cscA.values)\n\n","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_coo2csc","uri":"program://bitsandbytes/function/tests.test_functional.test_coo2csc#L1086-L1102","kind":"function","name":"test_coo2csc","path":"tests/test_functional.py","language":"python","start_line":1086,"end_line":1102,"context_start_line":1066,"context_end_line":1122,"code":"\n@pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\nclass TestSparseTensorFunctional:\n def test_coo2csr(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n csrA = F.coo2csr(cooA)\n counts = csrA.rowptr[1:] - csrA.rowptr[:-1]\n assert counts.numel() == A.shape[0]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(1))\n idx = A2 != 0\n torch.testing.assert_close(A2[idx], csrA.values)\n\n def test_coo2csc(self):\n threshold = 1\n A = torch.randn(128, 128).half().cuda()\n idx = torch.abs(A) >= threshold\n nnz = (idx == 1).sum().item()\n rows, cols = torch.where(idx)\n values = A[idx]\n cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n cscA = F.coo2csc(cooA)\n counts = cscA.colptr[1:] - cscA.colptr[:-1]\n assert counts.numel() == A.shape[1]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))\n # torch uses row-major -> use transpose to transfer to col-major\n idx = A2.t() != 0\n torch.testing.assert_close(A2.t()[idx], cscA.values)\n\n\nclass TestQuantize4BitFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256, 512, 1024, 2048, 4096] if not HIP_ENVIRONMENT else [128, 256, 512, 1024, 2048, 4096],\n )\n def test_4bit_quant(self, device, dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n qa, SA = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n A2 = F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-8)).mean()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_4bit_quant","uri":"program://bitsandbytes/function/tests.test_functional.test_4bit_quant#L1113-L1178","kind":"function","name":"test_4bit_quant","path":"tests/test_functional.py","language":"python","start_line":1113,"end_line":1178,"context_start_line":1093,"context_end_line":1198,"code":" cooA = F.COOSparseTensor(A.shape[0], A.shape[1], nnz, rows.int(), cols.int(), values)\n A2 = A * idx\n cscA = F.coo2csc(cooA)\n counts = cscA.colptr[1:] - cscA.colptr[:-1]\n assert counts.numel() == A.shape[1]\n\n torch.testing.assert_close(counts.long(), (A2 != 0).sum(0))\n # torch uses row-major -> use transpose to transfer to col-major\n idx = A2.t() != 0\n torch.testing.assert_close(A2.t()[idx], cscA.values)\n\n\nclass TestQuantize4BitFunctional:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16, torch.bfloat16], ids=describe_dtype)\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\n \"blocksize\",\n [64, 128, 256, 512, 1024, 2048, 4096] if not HIP_ENVIRONMENT else [128, 256, 512, 1024, 2048, 4096],\n )\n def test_4bit_quant(self, device, dtype, quant_type, blocksize):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n qa, SA = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n A2 = F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-8)).mean()\n err = err.mean()\n\n assert A2.dtype == dtype\n\n # With larger block sizes, we can expect this to blow up.\n # At blocksize>=1024, don't even bother looking at relerr.\n #\n # Actually, the above is not true anymore after fixing the integer packing bug.\n # The following values were taken from averaging 1k samples per test configuration after fixing the bug.\n error_dict = dict()\n error_dict[\"fp4\"] = dict()\n error_dict[\"nf4\"] = dict()\n error_dict[\"fp4\"][\"err\"] = {\n 64: 0.096545,\n 128: 0.102947,\n 256: 0.108685,\n 512: 0.114087,\n 1024: 0.119312,\n 2048: 0.124460,\n 4096: 0.129573,\n }\n error_dict[\"fp4\"][\"rel_err\"] = {\n 64: 0.260130,\n 128: 0.275734,\n 256: 0.289842,\n 512: 0.302852,\n 1024: 0.314982,\n 2048: 0.326402,\n 4096: 0.337228,\n }\n\n error_dict[\"nf4\"][\"err\"] = {\n 64: 0.072792,\n 128: 0.076835,\n 256: 0.080326,\n 512: 0.083535,\n 1024: 0.086603,\n 2048: 0.089592,\n 4096: 0.092537,\n }\n error_dict[\"nf4\"][\"rel_err\"] = {\n 64: 0.203299,\n 128: 0.215252,\n 256: 0.226044,\n 512: 0.236021,\n 1024: 0.245365,\n 2048: 0.254146,\n 4096: 0.262457,\n }\n\n # Allow higher tolerance for fp32 on CPU with larger block sizes\n reltol = 2.8e-3 if dtype == torch.float32 and blocksize >= 128 and device == \"cpu\" else 1e-3\n errtol = 1.2e-3 if dtype == torch.float32 and blocksize >= 1024 and device == \"cpu\" else 1e-3\n\n assert err < error_dict[quant_type][\"err\"][blocksize] + errtol\n assert relerr < error_dict[quant_type][\"rel_err\"][blocksize] + reltol\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128], ids=id_formatter(\"blocksize\"))\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16], ids=describe_dtype)\n def test_4bit_compressed_stats(self, device, quant_type, blocksize, dtype):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"FP4 quantization is not supported on HPU.\")\n\n errs1 = []\n errs2 = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n q2, SA2 = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n q3, SA3 = F.quantize_4bit(A1, blocksize=blocksize, compress_statistics=True, quant_type=quant_type)\n A2 = F.dequantize_4bit(q2, SA2, quant_type=quant_type)\n A3 = F.dequantize_4bit(q3, SA3, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-15)).mean()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_4bit_compressed_stats","uri":"program://bitsandbytes/function/tests.test_functional.test_4bit_compressed_stats#L1184-L1213","kind":"function","name":"test_4bit_compressed_stats","path":"tests/test_functional.py","language":"python","start_line":1184,"end_line":1213,"context_start_line":1164,"context_end_line":1233,"code":" 64: 0.203299,\n 128: 0.215252,\n 256: 0.226044,\n 512: 0.236021,\n 1024: 0.245365,\n 2048: 0.254146,\n 4096: 0.262457,\n }\n\n # Allow higher tolerance for fp32 on CPU with larger block sizes\n reltol = 2.8e-3 if dtype == torch.float32 and blocksize >= 128 and device == \"cpu\" else 1e-3\n errtol = 1.2e-3 if dtype == torch.float32 and blocksize >= 1024 and device == \"cpu\" else 1e-3\n\n assert err < error_dict[quant_type][\"err\"][blocksize] + errtol\n assert relerr < error_dict[quant_type][\"rel_err\"][blocksize] + reltol\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"quant_type\", [\"fp4\", \"nf4\"])\n @pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128], ids=id_formatter(\"blocksize\"))\n @pytest.mark.parametrize(\"dtype\", [torch.float32, torch.float16], ids=describe_dtype)\n def test_4bit_compressed_stats(self, device, quant_type, blocksize, dtype):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, dtype):\n pytest.skip(\"FP4 quantization is not supported on HPU.\")\n\n errs1 = []\n errs2 = []\n for i in range(10):\n A1 = torch.randn(1024, 1024, device=device, dtype=dtype)\n q2, SA2 = F.quantize_4bit(A1, blocksize=blocksize, quant_type=quant_type)\n q3, SA3 = F.quantize_4bit(A1, blocksize=blocksize, compress_statistics=True, quant_type=quant_type)\n A2 = F.dequantize_4bit(q2, SA2, quant_type=quant_type)\n A3 = F.dequantize_4bit(q3, SA3, quant_type=quant_type)\n\n err = (A1 - A2).abs().float()\n relerr = (err / (A1.abs().float() + 1e-15)).mean()\n err = err.mean()\n\n errs1.append(err.item())\n\n assert err.item() < 0.11\n assert relerr.item() < 0.28\n\n err = (A1 - A3).abs().float()\n relerr = (err / (A1.abs().float() + 1e-15)).mean()\n err = err.mean()\n\n errs2.append(err.item())\n\n assert err.item() < 0.11\n assert relerr.item() < 0.28\n\n # @pytest.mark.parametrize(\"quant_type\", ['fp4', 'nf4'])\n @pytest.mark.parametrize(\"quant_type\", [\"nf4\"])\n @pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\n @pytest.mark.benchmark\n def test_bench_4bit_dequant(self, quant_type):\n blocksize = 256\n a = torch.rand(1024 * 12 * 4, 1024 * 12, device=\"cuda\").half()\n qa, SA = F.quantize_4bit(a, blocksize=blocksize, quant_type=quant_type)\n\n input_size = a.numel() / 2\n output_size = a.numel() * 2\n num_bytes = input_size + output_size\n GB = num_bytes / 1e9\n max_theoretical_s = GB / 768\n # print(max_theoretical_s*1e6)\n b = torch.randn(128, 1024 * 12, device=\"cuda\").half()\n\n iters = 100\n torch.cuda.synchronize()","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_bench_4bit_dequant","uri":"program://bitsandbytes/function/tests.test_functional.test_bench_4bit_dequant#L1219-L1238","kind":"function","name":"test_bench_4bit_dequant","path":"tests/test_functional.py","language":"python","start_line":1219,"end_line":1238,"context_start_line":1199,"context_end_line":1258,"code":" err = err.mean()\n\n errs1.append(err.item())\n\n assert err.item() < 0.11\n assert relerr.item() < 0.28\n\n err = (A1 - A3).abs().float()\n relerr = (err / (A1.abs().float() + 1e-15)).mean()\n err = err.mean()\n\n errs2.append(err.item())\n\n assert err.item() < 0.11\n assert relerr.item() < 0.28\n\n # @pytest.mark.parametrize(\"quant_type\", ['fp4', 'nf4'])\n @pytest.mark.parametrize(\"quant_type\", [\"nf4\"])\n @pytest.mark.skipif(not torch.cuda.is_available(), reason=\"CUDA is required\")\n @pytest.mark.benchmark\n def test_bench_4bit_dequant(self, quant_type):\n blocksize = 256\n a = torch.rand(1024 * 12 * 4, 1024 * 12, device=\"cuda\").half()\n qa, SA = F.quantize_4bit(a, blocksize=blocksize, quant_type=quant_type)\n\n input_size = a.numel() / 2\n output_size = a.numel() * 2\n num_bytes = input_size + output_size\n GB = num_bytes / 1e9\n max_theoretical_s = GB / 768\n # print(max_theoretical_s*1e6)\n b = torch.randn(128, 1024 * 12, device=\"cuda\").half()\n\n iters = 100\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n F.dequantize_4bit(qa, SA, blocksize=blocksize, quant_type=quant_type)\n # b.copy_(a)\n torch.cuda.synchronize()\n # print((time.time()-t0)/iters*1e6)\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # torch.matmul(b, a.t())\n # torch.cuda.synchronize()\n # print((time.time()-t0)/iters*1e6)\n\n @pytest.mark.skipif(\n HIP_ENVIRONMENT, reason=\"gemv 4bit tests are partially enabled on MI300, others being fixed for warpsize 64\"\n )\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"double_quant\", TRUE_FALSE, ids=lambda double_quant: f\"DQ_{double_quant}\")\n @pytest.mark.parametrize(\"storage_type\", [\"nf4\", \"fp4\"])\n @pytest.mark.parametrize(\"kind\", [\"fc1\", \"fc2\", \"attn\", \"attn_packed\"])\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n @pytest.mark.parametrize(\n \"quant_storage\",\n [torch.uint8, torch.float16, torch.bfloat16, torch.float32],","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_gemv_4bit","uri":"program://bitsandbytes/function/tests.test_functional.test_gemv_4bit#L1262-L1406","kind":"function","name":"test_gemv_4bit","path":"tests/test_functional.py","language":"python","start_line":1262,"end_line":1406,"context_start_line":1242,"context_end_line":1426,"code":" # t0 = time.time()\n # for i in range(iters):\n # torch.matmul(b, a.t())\n # torch.cuda.synchronize()\n # print((time.time()-t0)/iters*1e6)\n\n @pytest.mark.skipif(\n HIP_ENVIRONMENT, reason=\"gemv 4bit tests are partially enabled on MI300, others being fixed for warpsize 64\"\n )\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"double_quant\", TRUE_FALSE, ids=lambda double_quant: f\"DQ_{double_quant}\")\n @pytest.mark.parametrize(\"storage_type\", [\"nf4\", \"fp4\"])\n @pytest.mark.parametrize(\"kind\", [\"fc1\", \"fc2\", \"attn\", \"attn_packed\"])\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n @pytest.mark.parametrize(\n \"quant_storage\",\n [torch.uint8, torch.float16, torch.bfloat16, torch.float32],\n ids=describe_dtype,\n )\n @pytest.mark.parametrize(\"dim\", [128, 256, 512, 1024], ids=id_formatter(\"dim\"))\n def test_gemv_4bit(self, device, dim, dtype, storage_type, quant_storage, double_quant, kind):\n if device == \"hpu\" and not is_supported_on_hpu(storage_type, dtype, quant_storage):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n errs1 = []\n errs2 = []\n errs3 = []\n relerrs1 = []\n relerrs2 = []\n relerrs3 = []\n max_errs1 = []\n max_errs2 = []\n max_errs3 = []\n\n # Large number of iterations is excessive and slow on CPU.\n # Keep for CUDA/XPU for now.\n iters = 10 if device == \"cpu\" else 100\n\n for i in range(iters):\n if kind == \"fc1\":\n A = torch.randn(1, dim, dtype=dtype, device=device)\n B = torch.randn(dim * 4, dim, dtype=dtype, device=device) / math.sqrt(dim)\n elif kind == \"fc2\":\n A = torch.randn(1, 4 * dim, dtype=dtype, device=device)\n B = torch.randn(dim, 4 * dim, dtype=dtype, device=device) / math.sqrt(dim)\n elif kind == \"attn\":\n A = torch.randn(1, dim, dtype=dtype, device=device)\n B = torch.randn(dim, dim, dtype=dtype, device=device) / math.sqrt(dim)\n elif kind == \"attn_packed\":\n A = torch.randn(1, dim, dtype=dtype, device=device)\n B = torch.randn(dim * 3, dim, dtype=dtype, device=device) / math.sqrt(dim)\n\n qB, state = F.quantize_4bit(\n B,\n quant_type=storage_type,\n compress_statistics=double_quant,\n quant_storage=quant_storage,\n )\n C3 = torch.matmul(A, B.t())\n C2 = F.gemv_4bit(A, qB.t(), state=state)\n A.requires_grad = True\n C1 = bnb.matmul_4bit(A, qB.t(), state)\n\n err1 = (C1 - C2).abs().float()\n err2 = (C3 - C2).abs().float()\n err3 = (C3 - C1).abs().float()\n\n mag1 = torch.abs(C1).float() + 1e-5\n mag2 = torch.abs(C3).float() + 1e-5\n mag3 = torch.abs(C3).float() + 1e-5\n\n relerr1 = err1 / mag1\n relerr2 = err2 / mag2\n relerr3 = err3 / mag3\n\n max_err1 = err1.max()\n max_err2 = err2.max()\n max_err3 = err3.max()\n\n errs1.append(err1.mean().item())\n errs2.append(err2.mean().item())\n errs3.append(err3.mean().item())\n\n relerrs1.append(relerr1.mean().item())\n relerrs2.append(relerr2.mean().item())\n relerrs3.append(relerr3.mean().item())\n\n max_errs1.append(max_err1.item())\n max_errs2.append(max_err2.item())\n max_errs3.append(max_err3.item())\n\n c = int(C1.numel() * 0.0014 * (dim / 256)) + 1\n\n c = assert_all_approx_close(C1, C2, 1e-5, 0.01, count=0, throw=False)\n err1 = sum(errs1) / len(errs1) / math.sqrt(dim)\n err2 = sum(errs2) / len(errs2) / math.sqrt(dim)\n err3 = sum(errs3) / len(errs3) / math.sqrt(dim)\n relerr1 = sum(relerrs1) / len(relerrs1) / math.sqrt(dim)\n relerr2 = sum(relerrs2) / len(relerrs2) / math.sqrt(dim)\n relerr3 = sum(relerrs3) / len(relerrs3) / math.sqrt(dim)\n maxerr1 = sum(max_errs1) / len(max_errs1) / math.sqrt(dim)\n maxerr2 = sum(max_errs2) / len(max_errs2) / math.sqrt(dim)\n maxerr3 = sum(max_errs3) / len(max_errs3) / math.sqrt(dim)\n absratio = err2 / err3\n relratio = relerr2 / relerr3\n maxratio = relerr2 / relerr3\n\n # for debugging if the tests fails\n #\n # print('='*80)\n # print(f'For matmul: {A.shape}, {B.shape}, {kind}, {dtype}, {storage_type}, double_quant={double_quant}:')\n # print(C1.flatten()[-20:])\n # print(C2.flatten()[-20:])\n # print(f'inference vs training abs: {err1}')\n # print(f'inference vs training rel: {relerr1}')\n # print(f'inference vs training max: {maxerr1}')\n # print(f'inference vs training vs torch err ratio abs: {absratio}')\n # print(f'inference vs training vs torch err ratio rel: {relratio}')\n # print(f'inference vs training vs torch err ratio max: {maxratio}')\n if dtype == torch.float16:\n if dim <= 512:\n assert err1 < 7e-5\n\n # TODO(matthewdouglas): On T4, dim=128-fp16-fc2-fp4-DQ will have relerror ~ 0.00092727\n if (\n device == \"cuda\"\n and double_quant\n and storage_type == \"fp4\"\n and kind == \"fc2\"\n and torch.cuda.get_device_capability() == (7, 5)\n ):\n assert relerr1 < 0.00093\n else:\n assert relerr1 < 0.0008\n else:\n assert err1 < 6e-5\n assert relerr1 < 2e-4\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.005 and relratio > 0.992\n assert maxratio < 1.005 and maxratio > 0.992\n elif dtype == torch.float32:\n if dim <= 512:\n assert err1 < 5e-8\n assert relerr1 < 1e-6\n assert maxerr1 < 1.05e-7\n else:\n assert err1 < 5e-8\n assert relerr1 < 8e-6\n assert maxerr1 < 1e-7\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.005 and relratio > 0.995\n assert maxratio < 1.005 and maxratio > 0.995\n elif dtype == torch.bfloat16:\n if dim <= 512:\n relerr_thres = 0.013 if hasattr(torch, \"xpu\") and torch.xpu.is_available() else 0.007\n assert err1 < 6e-4\n assert relerr1 < relerr_thres\n assert maxerr1 < 0.015\n else:\n assert err1 < 2e-4\n assert relerr1 < 0.002\n assert maxerr1 < 0.0012\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.05 and relratio > 0.96\n assert maxratio < 1.05 and maxratio > 0.97\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"storage_type\", [\"nf4\", \"fp4\"], ids=[\"nf4\", \"fp4\"])\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n @pytest.mark.skipif(\n HIP_ENVIRONMENT and ROCM_GPU_ARCH == \"gfx90a\",\n reason=\"this test is not supported on ROCm with gfx90a architecture yet\",\n )\n def test_gemv_eye_4bit(self, device, storage_type, dtype):\n if device == \"hpu\" and not is_supported_on_hpu(storage_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if (\n device == \"cpu\"\n and platform.system() == \"Windows\"\n and version.parse(torch.__version__).release == (2, 8, 0)\n ):\n pytest.skip(\"Regression: CPU crash on Windows with torch 2.8.0\")\n\n dims = 4","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.test_gemv_eye_4bit","uri":"program://bitsandbytes/function/tests.test_functional.test_gemv_eye_4bit#L1415-L1442","kind":"function","name":"test_gemv_eye_4bit","path":"tests/test_functional.py","language":"python","start_line":1415,"end_line":1442,"context_start_line":1395,"context_end_line":1459,"code":" if dim <= 512:\n relerr_thres = 0.013 if hasattr(torch, \"xpu\") and torch.xpu.is_available() else 0.007\n assert err1 < 6e-4\n assert relerr1 < relerr_thres\n assert maxerr1 < 0.015\n else:\n assert err1 < 2e-4\n assert relerr1 < 0.002\n assert maxerr1 < 0.0012\n assert absratio < 1.005 and absratio > 0.995\n assert relratio < 1.05 and relratio > 0.96\n assert maxratio < 1.05 and maxratio > 0.97\n\n @pytest.mark.parametrize(\"device\", get_available_devices())\n @pytest.mark.parametrize(\"storage_type\", [\"nf4\", \"fp4\"], ids=[\"nf4\", \"fp4\"])\n @pytest.mark.parametrize(\"dtype\", [torch.float16, torch.bfloat16, torch.float32], ids=describe_dtype)\n @pytest.mark.skipif(\n HIP_ENVIRONMENT and ROCM_GPU_ARCH == \"gfx90a\",\n reason=\"this test is not supported on ROCm with gfx90a architecture yet\",\n )\n def test_gemv_eye_4bit(self, device, storage_type, dtype):\n if device == \"hpu\" and not is_supported_on_hpu(storage_type, dtype):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if (\n device == \"cpu\"\n and platform.system() == \"Windows\"\n and version.parse(torch.__version__).release == (2, 8, 0)\n ):\n pytest.skip(\"Regression: CPU crash on Windows with torch 2.8.0\")\n\n dims = 4\n dims = get_test_dims(0, 8192, n=dims)\n dims = [dim + (64 - (dim % 64)) for dim in dims]\n # for dim in [576, 5120, 3520, 5184, 1280, 4992, 5312, 2048]:\n for dim in dims:\n A = torch.normal(0, 0.1, size=(1, 1, dim), dtype=dtype, device=device)\n B = torch.eye(dim, dtype=dtype, device=device)\n\n qB, state = F.quantize_4bit(B, quant_type=storage_type, compress_statistics=False)\n C3 = torch.matmul(A, B.t())\n C2 = bnb.matmul_4bit(A, qB.t(), state)\n A.requires_grad = True\n C1 = bnb.matmul_4bit(A, qB.t(), state)\n\n torch.testing.assert_close(A, C3)\n torch.testing.assert_close(A, C1)\n torch.testing.assert_close(A, C2)\n # torch.testing.assert_close(A, C1, rtol=1e-5, atol=0.00001)\n # torch.testing.assert_close(A, C2, rtol=1e-5, atol=0.080)\n\n\ndef test_normal_map_tree():\n code = F.create_normal_map()\n values = code[:8].tolist() + code[-8:].tolist()\n num_pivots = 1\n # print(values)\n while num_pivots < 16:\n idx = list(range(16 // num_pivots // 2, 16, 16 // num_pivots))\n # print(idx)\n num_pivots *= 2\n pivots = []\n for i in idx:\n pivots.append((values[i - 1] + values[i]) / 2)\n # print(pivots)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_functional.min_max","uri":"program://bitsandbytes/function/tests.test_functional.min_max#L467-L471","kind":"function","name":"min_max","path":"tests/test_functional.py","language":"python","start_line":467,"end_line":471,"context_start_line":447,"context_end_line":491,"code":" @pytest.mark.parametrize(\"hidden_dim\", [64, 1024, 4096], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 8, 16], ids=id_formatter(\"batch_dim\"))\n def test_dim3_igemm(self, seq_dim, hidden_dim, batch_dim):\n seq_dim = seq_dim - (seq_dim % 32)\n hidden_dim = hidden_dim - (hidden_dim % 32)\n batch_dim = batch_dim - (batch_dim % 2)\n for i in range(25):\n A = torch.randint(-128, 127, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\").to(torch.int8)\n B = torch.randint(-128, 127, size=(batch_dim, seq_dim, 1024), device=\"cuda\").to(torch.int8)\n out2 = torch.einsum(\"bsi, bso->io\", A.float(), B.float())\n iout = torch.empty(A.shape[2], B.shape[2], dtype=torch.int32, device=A.device)\n out = F.igemm(A, B, out=iout)\n\n torch.testing.assert_close(out.float(), out2)\n\n @pytest.mark.parametrize(\"seq_dim\", [32, 512], ids=id_formatter(\"seq_dim\"))\n @pytest.mark.parametrize(\"hidden_dim\", [32, 1024 * 4], ids=id_formatter(\"hidden_dim\"))\n @pytest.mark.parametrize(\"batch_dim\", [2, 16], ids=id_formatter(\"batch_dim\"))\n @pytest.mark.parametrize(\"transpose\", TRUE_FALSE, ids=id_formatter(\"transpose\"))\n def test_minmax_igemm(self, seq_dim, hidden_dim, batch_dim, transpose):\n def min_max(x):\n maxA = torch.amax(x, dim=2, keepdim=True)\n minA = torch.amin(x, dim=2, keepdim=True)\n scale = (maxA - minA) / 2.0\n return (127 * (x - minA - scale) / scale).to(torch.int8), minA, scale\n\n seq_dim = seq_dim - (seq_dim % 16)\n hidden_dim = hidden_dim - (hidden_dim % 16)\n batch_dim = batch_dim - (batch_dim % 2)\n errs = []\n relerrs = []\n errs2 = []\n relerrs2 = []\n for i in range(k):\n A = torch.normal(0.0, 0.5, size=(batch_dim, seq_dim, hidden_dim), device=\"cuda\")\n if transpose:\n B = torch.normal(0, 0.5, size=(256, hidden_dim), device=\"cuda\")\n else:\n B = torch.normal(0, 0.5, size=(hidden_dim, 256), device=\"cuda\")\n Ac, minA, scale = min_max(A)\n if transpose:\n maxB, Bc = quant_multi(B, dim=(1 if transpose else 0))\n out = F.igemm(Ac, Bc.t())\n out2 = torch.matmul(A, B.t())\n offset = B.t().sum(0) * (minA + scale)","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit","uri":"program://bitsandbytes/module/tests.test_linear4bit#L1-L395","kind":"module","name":"tests.test_linear4bit","path":"tests/test_linear4bit.py","language":"python","start_line":1,"end_line":395,"context_start_line":1,"context_end_line":395,"code":"import copy\nimport os\nimport pickle\nimport platform\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom tests.helpers import (\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n torch_load_from_buffer,\n torch_save_to_buffer,\n)\n\nstorage = {\n \"uint8\": torch.uint8,\n \"float16\": torch.float16,\n \"bfloat16\": torch.bfloat16,\n \"float32\": torch.float32,\n}\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_storage\", [\"uint8\", \"float16\", \"bfloat16\", \"float32\"])\n@pytest.mark.parametrize(\"original_dtype\", [torch.float16, torch.bfloat16])\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"save_before_forward\", TRUE_FALSE, ids=id_formatter(\"save_before_forward\"))\ndef test_linear_serialization(\n device, quant_type, original_dtype, compress_statistics, bias, quant_storage, save_before_forward\n):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, original_dtype, storage[quant_storage]):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n compute_dtype = None\n layer_shape = (300, 400)\n\n linear = torch.nn.Linear(*layer_shape, dtype=original_dtype, device=\"cpu\") # original layer\n\n # Quantizing original layer\n linear_q = bnb.nn.Linear4bit(\n linear.in_features,\n linear.out_features,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n device=\"meta\",\n )\n new_weight = bnb.nn.Params4bit(data=linear.weight, quant_type=quant_type, requires_grad=False)\n linear_q.weight = new_weight\n if bias:\n linear_q.bias = torch.nn.Parameter(linear.bias)\n linear_q = linear_q.to(device)\n\n # saving to state_dict:\n sd = linear_q.state_dict()\n\n # restoring from state_dict:\n bias_data2 = sd.pop(\"bias\", None)\n weight_data2 = sd.pop(\"weight\")\n weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2, device=device)\n\n # creating new layer with same params:\n linear_q2 = bnb.nn.Linear4bit(\n linear.in_features,\n linear.out_features,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n device=\"meta\",\n )\n # loading weights from state_dict:\n linear_q2.weight = weight2\n if bias:\n linear_q2.bias = torch.nn.Parameter(bias_data2)\n linear_q2 = linear_q2.to(device)\n\n # MATCHING\n a, b = linear_q.weight, linear_q2.weight\n\n # Quantizing original layer with specified quant_storage type\n linear_qs = bnb.nn.Linear4bit(\n linear.in_features,\n linear.out_features,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n quant_storage=storage[quant_storage],\n device=\"meta\",\n )\n linear_qs.weight = bnb.nn.Params4bit(\n data=linear.weight,\n requires_grad=False,\n quant_type=quant_type,\n quant_storage=storage[quant_storage],\n )\n if bias:\n linear_qs.bias = torch.nn.Parameter(linear.bias)\n linear_qs = linear_qs.to(device)\n\n assert a.device == b.device\n assert a.dtype == b.dtype\n assert torch.equal(a, b)\n\n q0 = a.quant_state\n q1 = b.quant_state\n for attr in (\"code\", \"dtype\", \"blocksize\", \"absmax\"):\n c, d = getattr(q0, attr), getattr(q1, attr)\n if isinstance(c, torch.Tensor):\n assert torch.equal(c, d)\n else:\n assert c == d, f\"{c} != {d}\"\n\n if q0.state2 is not None:\n for attr in (\"code\", \"dtype\", \"blocksize\", \"absmax\"):\n c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)\n if isinstance(c, torch.Tensor):\n assert torch.equal(c, d)\n else:\n assert c == d, f\"{c} != {d}\"\n\n if bias:\n a, b = linear_q.bias, linear_q2.bias\n assert a.device == b.device\n assert a.dtype == b.dtype\n assert torch.equal(a, b)\n\n if save_before_forward:\n bytes_4bit = torch_save_to_buffer(linear_q)\n\n # Forward test\n x = torch.rand(42, layer_shape[0], device=device)\n a = linear_q(x)\n b = linear_q2(x)\n c = linear_qs(x)\n assert a.device == b.device\n assert a.dtype == b.dtype\n assert a.device == c.device\n assert a.dtype == c.dtype\n assert torch.equal(a, b)\n assert torch.equal(a, c)\n\n if not save_before_forward:\n bytes_4bit = torch_save_to_buffer(linear_q)\n linear_q3 = torch_load_from_buffer(bytes_4bit)\n\n # Test moving to CPU and back to GPU\n if device != \"cpu\":\n linear_q2.to(\"cpu\")\n linear_q2.to(device)\n d = linear_qs(x)\n assert c.dtype == d.dtype\n assert c.device == d.device\n assert torch.equal(c, d)\n\n d = linear_q3(x)\n assert c.dtype == d.dtype\n assert c.device == d.device\n assert torch.equal(c, d)\n\n # Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias\n with TemporaryDirectory() as tmpdir:\n state_path_4bit = os.path.join(tmpdir, \"state_4bit.pth\")\n state_path = os.path.join(tmpdir, \"state.pth\")\n torch.save(linear.state_dict(), state_path)\n torch.save(linear_q.state_dict(), state_path_4bit)\n\n size_orig, size_4 = (\n os.path.getsize(state_path),\n os.path.getsize(state_path_4bit),\n )\n size_ratio = size_4 / size_orig\n target_compression = (\n 0.143 if original_dtype == torch.float32 else 0.29\n ) # these numbers get lower as weight shape increases\n ratio_error_msg = (\n f\"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}\"\n )\n assert size_ratio < target_compression, ratio_error_msg\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_copy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n\n shallow_copy_param = copy.copy(param)\n assert param.quant_state is shallow_copy_param.quant_state\n assert param.data.data_ptr() == shallow_copy_param.data.data_ptr()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\ndef test_params4bit_torch_chunk_split(device, quant_type):\n \"\"\"Test that torch.chunk and torch.split preserve Params4bit subclass for FSDP2 compatibility.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, torch.float16, torch.uint8):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if device == \"cpu\":\n pytest.skip(\"CPU quantization causes segfault, skipping CPU test\")\n\n original_tensor = torch.randn(8, 4, dtype=torch.float16, device=\"cpu\")\n\n params4bit = bnb.nn.Params4bit(data=original_tensor, quant_type=quant_type, requires_grad=False)\n\n if device != \"cpu\":\n params4bit = params4bit.to(device)\n\n chunks = torch.chunk(params4bit, 2, dim=0)\n\n assert isinstance(chunks, tuple), \"torch.chunk should return tuple\"\n for chunk in chunks:\n assert isinstance(chunk, bnb.nn.Params4bit), \"Chunk should preserve Params4bit subclass\"\n assert hasattr(chunk, \"quant_type\"), \"Should preserve metadata\"\n assert chunk.quant_type == params4bit.quant_type, \"Should preserve quant_type value\"\n\n splits = torch.split(params4bit, 2, dim=0)\n\n assert isinstance(splits, tuple), \"torch.split should return tuple\"\n assert len(splits) > 0, \"Should have at least one split\"\n for split in splits:\n assert isinstance(split, bnb.nn.Params4bit), \"Split should preserve Params4bit subclass\"\n assert hasattr(split, \"quant_type\"), \"Should preserve metadata\"\n assert split.quant_type == params4bit.quant_type, \"Should preserve quant_type value\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_deepcopy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n dict_keys_before = set(param.__dict__.keys())\n copy_param = copy.deepcopy(param)\n dict_keys_after = set(param.__dict__.keys())\n dict_keys_copy = set(copy_param.__dict__.keys())\n\n assert param.quant_state is not copy_param.quant_state\n assert param.data.data_ptr() != copy_param.data.data_ptr()\n\n # there was a bug where deepcopy would modify the original object\n assert dict_keys_before == dict_keys_after\n assert dict_keys_before == dict_keys_copy\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_params4bit_real_serialization(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n original_tensor = torch.randn(300, 400)\n original_param = bnb.nn.Params4bit(\n data=original_tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n )\n dict_keys_before = set(original_param.__dict__.keys())\n\n original_param.to(device) # change device to trigger quantization\n\n serialized_param = pickle.dumps(original_param)\n deserialized_param = pickle.loads(serialized_param)\n dict_keys_after = set(original_param.__dict__.keys())\n dict_keys_deserialized = set(deserialized_param.__dict__.keys())\n\n assert torch.equal(original_param.data, deserialized_param.data)\n assert original_param.requires_grad == deserialized_param.requires_grad == False\n assert original_param.quant_type == deserialized_param.quant_type\n assert original_param.blocksize == deserialized_param.blocksize\n assert original_param.compress_statistics == deserialized_param.compress_statistics\n assert original_param.quant_state == deserialized_param.quant_state\n\n # there was a bug where deepcopy would modify the original object\n assert dict_keys_before == dict_keys_after\n assert dict_keys_before == dict_keys_deserialized\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compute_dtype\", [torch.bfloat16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))\n@pytest.mark.parametrize(\"mode\", [\"default\", \"reduce-overhead\"], ids=id_formatter(\"mode\"))\n@pytest.mark.skipif(torch.__version__ < (2, 4), reason=\"Not supported in torch < 2.4\")\ndef test_linear4bit_torch_compile(device, quant_type, compute_dtype, compress_statistics, bias, fullgraph, mode):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if fullgraph and torch.__version__ < (2, 8, 0, \"dev\"):\n pytest.skip(\"fullgraph mode requires torch 2.8 or higher\")\n\n if device == \"cuda\" and platform.system() == \"Windows\":\n pytest.skip(\"Triton is not officially supported on Windows\")\n\n # Has a strange regression on Linux aarch64 CPU in torch==2.6.0 when fullgraph=False.\n if (\n not fullgraph\n and device == \"cpu\"\n and platform.machine() == \"aarch64\"\n and platform.system() == \"Linux\"\n and ((2, 7) > torch.__version__ >= (2, 6))\n ):\n pytest.xfail(\"Regression in torch==2.6.0 on Linux aarch64 CPU\")\n\n dim = 256\n batch_size = 16\n\n torch.compiler.reset()\n\n # Create a small network with Linear4bit layers\n net = torch.nn.Sequential(\n *[\n bnb.nn.Linear4bit(\n dim,\n dim,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n for _ in range(4)\n ]\n ).to(device)\n\n # Create input tensor\n x = torch.randn(batch_size, dim, dtype=compute_dtype, device=device)\n\n # Get reference output before compilation\n with torch.no_grad():\n ref_output = net(x)\n\n # Compile the model\n compile_backend = \"hpu_backend\" if device == \"hpu\" else \"inductor\"\n compiled_net = torch.compile(net, fullgraph=fullgraph, mode=mode, backend=compile_backend)\n\n # Get output from compiled model\n with torch.no_grad():\n compiled_output = compiled_net(x)\n\n # Check outputs match\n assert compiled_output.shape == ref_output.shape\n assert compiled_output.device == ref_output.device\n assert compiled_output.dtype == ref_output.dtype\n torch.testing.assert_close(compiled_output, ref_output)\n\n # Test with gradients\n x.requires_grad_(True)\n y1 = net(x).sum()\n y1.backward()\n grad_ref = x.grad.clone()\n\n x.grad = None\n y2 = compiled_net(x).sum()\n y2.backward()\n grad_compiled = x.grad.clone()\n\n torch.testing.assert_close(grad_compiled, grad_ref)","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit.test_linear_serialization","uri":"program://bitsandbytes/function/tests.test_linear4bit.test_linear_serialization#L37-L190","kind":"function","name":"test_linear_serialization","path":"tests/test_linear4bit.py","language":"python","start_line":37,"end_line":190,"context_start_line":17,"context_end_line":210,"code":" is_supported_on_hpu,\n torch_load_from_buffer,\n torch_save_to_buffer,\n)\n\nstorage = {\n \"uint8\": torch.uint8,\n \"float16\": torch.float16,\n \"bfloat16\": torch.bfloat16,\n \"float32\": torch.float32,\n}\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_storage\", [\"uint8\", \"float16\", \"bfloat16\", \"float32\"])\n@pytest.mark.parametrize(\"original_dtype\", [torch.float16, torch.bfloat16])\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"save_before_forward\", TRUE_FALSE, ids=id_formatter(\"save_before_forward\"))\ndef test_linear_serialization(\n device, quant_type, original_dtype, compress_statistics, bias, quant_storage, save_before_forward\n):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, original_dtype, storage[quant_storage]):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n compute_dtype = None\n layer_shape = (300, 400)\n\n linear = torch.nn.Linear(*layer_shape, dtype=original_dtype, device=\"cpu\") # original layer\n\n # Quantizing original layer\n linear_q = bnb.nn.Linear4bit(\n linear.in_features,\n linear.out_features,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n device=\"meta\",\n )\n new_weight = bnb.nn.Params4bit(data=linear.weight, quant_type=quant_type, requires_grad=False)\n linear_q.weight = new_weight\n if bias:\n linear_q.bias = torch.nn.Parameter(linear.bias)\n linear_q = linear_q.to(device)\n\n # saving to state_dict:\n sd = linear_q.state_dict()\n\n # restoring from state_dict:\n bias_data2 = sd.pop(\"bias\", None)\n weight_data2 = sd.pop(\"weight\")\n weight2 = bnb.nn.Params4bit.from_prequantized(quantized_stats=sd, data=weight_data2, device=device)\n\n # creating new layer with same params:\n linear_q2 = bnb.nn.Linear4bit(\n linear.in_features,\n linear.out_features,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n device=\"meta\",\n )\n # loading weights from state_dict:\n linear_q2.weight = weight2\n if bias:\n linear_q2.bias = torch.nn.Parameter(bias_data2)\n linear_q2 = linear_q2.to(device)\n\n # MATCHING\n a, b = linear_q.weight, linear_q2.weight\n\n # Quantizing original layer with specified quant_storage type\n linear_qs = bnb.nn.Linear4bit(\n linear.in_features,\n linear.out_features,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n quant_storage=storage[quant_storage],\n device=\"meta\",\n )\n linear_qs.weight = bnb.nn.Params4bit(\n data=linear.weight,\n requires_grad=False,\n quant_type=quant_type,\n quant_storage=storage[quant_storage],\n )\n if bias:\n linear_qs.bias = torch.nn.Parameter(linear.bias)\n linear_qs = linear_qs.to(device)\n\n assert a.device == b.device\n assert a.dtype == b.dtype\n assert torch.equal(a, b)\n\n q0 = a.quant_state\n q1 = b.quant_state\n for attr in (\"code\", \"dtype\", \"blocksize\", \"absmax\"):\n c, d = getattr(q0, attr), getattr(q1, attr)\n if isinstance(c, torch.Tensor):\n assert torch.equal(c, d)\n else:\n assert c == d, f\"{c} != {d}\"\n\n if q0.state2 is not None:\n for attr in (\"code\", \"dtype\", \"blocksize\", \"absmax\"):\n c, d = getattr(q0.state2, attr), getattr(q1.state2, attr)\n if isinstance(c, torch.Tensor):\n assert torch.equal(c, d)\n else:\n assert c == d, f\"{c} != {d}\"\n\n if bias:\n a, b = linear_q.bias, linear_q2.bias\n assert a.device == b.device\n assert a.dtype == b.dtype\n assert torch.equal(a, b)\n\n if save_before_forward:\n bytes_4bit = torch_save_to_buffer(linear_q)\n\n # Forward test\n x = torch.rand(42, layer_shape[0], device=device)\n a = linear_q(x)\n b = linear_q2(x)\n c = linear_qs(x)\n assert a.device == b.device\n assert a.dtype == b.dtype\n assert a.device == c.device\n assert a.dtype == c.dtype\n assert torch.equal(a, b)\n assert torch.equal(a, c)\n\n if not save_before_forward:\n bytes_4bit = torch_save_to_buffer(linear_q)\n linear_q3 = torch_load_from_buffer(bytes_4bit)\n\n # Test moving to CPU and back to GPU\n if device != \"cpu\":\n linear_q2.to(\"cpu\")\n linear_q2.to(device)\n d = linear_qs(x)\n assert c.dtype == d.dtype\n assert c.device == d.device\n assert torch.equal(c, d)\n\n d = linear_q3(x)\n assert c.dtype == d.dtype\n assert c.device == d.device\n assert torch.equal(c, d)\n\n # Saved size ratio test. Target set for layer_shape == (300, 400) w/ bias\n with TemporaryDirectory() as tmpdir:\n state_path_4bit = os.path.join(tmpdir, \"state_4bit.pth\")\n state_path = os.path.join(tmpdir, \"state.pth\")\n torch.save(linear.state_dict(), state_path)\n torch.save(linear_q.state_dict(), state_path_4bit)\n\n size_orig, size_4 = (\n os.path.getsize(state_path),\n os.path.getsize(state_path_4bit),\n )\n size_ratio = size_4 / size_orig\n target_compression = (\n 0.143 if original_dtype == torch.float32 else 0.29\n ) # these numbers get lower as weight shape increases\n ratio_error_msg = (\n f\"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}\"\n )\n assert size_ratio < target_compression, ratio_error_msg\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_copy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n\n shallow_copy_param = copy.copy(param)","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit.test_copy_param","uri":"program://bitsandbytes/function/tests.test_linear4bit.test_copy_param#L197-L212","kind":"function","name":"test_copy_param","path":"tests/test_linear4bit.py","language":"python","start_line":197,"end_line":212,"context_start_line":177,"context_end_line":232,"code":" torch.save(linear_q.state_dict(), state_path_4bit)\n\n size_orig, size_4 = (\n os.path.getsize(state_path),\n os.path.getsize(state_path_4bit),\n )\n size_ratio = size_4 / size_orig\n target_compression = (\n 0.143 if original_dtype == torch.float32 else 0.29\n ) # these numbers get lower as weight shape increases\n ratio_error_msg = (\n f\"quantized_size {size_4:,} is larger on disk than {target_compression:.2%} of original size {size_orig:,}\"\n )\n assert size_ratio < target_compression, ratio_error_msg\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_copy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n\n shallow_copy_param = copy.copy(param)\n assert param.quant_state is shallow_copy_param.quant_state\n assert param.data.data_ptr() == shallow_copy_param.data.data_ptr()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\ndef test_params4bit_torch_chunk_split(device, quant_type):\n \"\"\"Test that torch.chunk and torch.split preserve Params4bit subclass for FSDP2 compatibility.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, torch.float16, torch.uint8):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if device == \"cpu\":\n pytest.skip(\"CPU quantization causes segfault, skipping CPU test\")\n\n original_tensor = torch.randn(8, 4, dtype=torch.float16, device=\"cpu\")\n\n params4bit = bnb.nn.Params4bit(data=original_tensor, quant_type=quant_type, requires_grad=False)\n\n if device != \"cpu\":\n params4bit = params4bit.to(device)\n\n chunks = torch.chunk(params4bit, 2, dim=0)","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit.test_params4bit_torch_chunk_split","uri":"program://bitsandbytes/function/tests.test_linear4bit.test_params4bit_torch_chunk_split#L217-L247","kind":"function","name":"test_params4bit_torch_chunk_split","path":"tests/test_linear4bit.py","language":"python","start_line":217,"end_line":247,"context_start_line":197,"context_end_line":267,"code":"def test_copy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n\n shallow_copy_param = copy.copy(param)\n assert param.quant_state is shallow_copy_param.quant_state\n assert param.data.data_ptr() == shallow_copy_param.data.data_ptr()\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\ndef test_params4bit_torch_chunk_split(device, quant_type):\n \"\"\"Test that torch.chunk and torch.split preserve Params4bit subclass for FSDP2 compatibility.\"\"\"\n if device == \"hpu\" and not is_supported_on_hpu(quant_type, torch.float16, torch.uint8):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if device == \"cpu\":\n pytest.skip(\"CPU quantization causes segfault, skipping CPU test\")\n\n original_tensor = torch.randn(8, 4, dtype=torch.float16, device=\"cpu\")\n\n params4bit = bnb.nn.Params4bit(data=original_tensor, quant_type=quant_type, requires_grad=False)\n\n if device != \"cpu\":\n params4bit = params4bit.to(device)\n\n chunks = torch.chunk(params4bit, 2, dim=0)\n\n assert isinstance(chunks, tuple), \"torch.chunk should return tuple\"\n for chunk in chunks:\n assert isinstance(chunk, bnb.nn.Params4bit), \"Chunk should preserve Params4bit subclass\"\n assert hasattr(chunk, \"quant_type\"), \"Should preserve metadata\"\n assert chunk.quant_type == params4bit.quant_type, \"Should preserve quant_type value\"\n\n splits = torch.split(params4bit, 2, dim=0)\n\n assert isinstance(splits, tuple), \"torch.split should return tuple\"\n assert len(splits) > 0, \"Should have at least one split\"\n for split in splits:\n assert isinstance(split, bnb.nn.Params4bit), \"Split should preserve Params4bit subclass\"\n assert hasattr(split, \"quant_type\"), \"Should preserve metadata\"\n assert split.quant_type == params4bit.quant_type, \"Should preserve quant_type value\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_deepcopy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n dict_keys_before = set(param.__dict__.keys())\n copy_param = copy.deepcopy(param)","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit.test_deepcopy_param","uri":"program://bitsandbytes/function/tests.test_linear4bit.test_deepcopy_param#L254-L276","kind":"function","name":"test_deepcopy_param","path":"tests/test_linear4bit.py","language":"python","start_line":254,"end_line":276,"context_start_line":234,"context_end_line":296,"code":" assert isinstance(chunks, tuple), \"torch.chunk should return tuple\"\n for chunk in chunks:\n assert isinstance(chunk, bnb.nn.Params4bit), \"Chunk should preserve Params4bit subclass\"\n assert hasattr(chunk, \"quant_type\"), \"Should preserve metadata\"\n assert chunk.quant_type == params4bit.quant_type, \"Should preserve quant_type value\"\n\n splits = torch.split(params4bit, 2, dim=0)\n\n assert isinstance(splits, tuple), \"torch.split should return tuple\"\n assert len(splits) > 0, \"Should have at least one split\"\n for split in splits:\n assert isinstance(split, bnb.nn.Params4bit), \"Split should preserve Params4bit subclass\"\n assert hasattr(split, \"quant_type\"), \"Should preserve metadata\"\n assert split.quant_type == params4bit.quant_type, \"Should preserve quant_type value\"\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_deepcopy_param(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n tensor = torch.randn(300, 400)\n param = bnb.nn.Params4bit(\n data=tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n dict_keys_before = set(param.__dict__.keys())\n copy_param = copy.deepcopy(param)\n dict_keys_after = set(param.__dict__.keys())\n dict_keys_copy = set(copy_param.__dict__.keys())\n\n assert param.quant_state is not copy_param.quant_state\n assert param.data.data_ptr() != copy_param.data.data_ptr()\n\n # there was a bug where deepcopy would modify the original object\n assert dict_keys_before == dict_keys_after\n assert dict_keys_before == dict_keys_copy\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_params4bit_real_serialization(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n original_tensor = torch.randn(300, 400)\n original_param = bnb.nn.Params4bit(\n data=original_tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n )\n dict_keys_before = set(original_param.__dict__.keys())\n\n original_param.to(device) # change device to trigger quantization","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit.test_params4bit_real_serialization","uri":"program://bitsandbytes/function/tests.test_linear4bit.test_params4bit_real_serialization#L283-L312","kind":"function","name":"test_params4bit_real_serialization","path":"tests/test_linear4bit.py","language":"python","start_line":283,"end_line":312,"context_start_line":263,"context_end_line":332,"code":" compress_statistics=compress_statistics,\n requires_grad=False,\n ).to(device)\n dict_keys_before = set(param.__dict__.keys())\n copy_param = copy.deepcopy(param)\n dict_keys_after = set(param.__dict__.keys())\n dict_keys_copy = set(copy_param.__dict__.keys())\n\n assert param.quant_state is not copy_param.quant_state\n assert param.data.data_ptr() != copy_param.data.data_ptr()\n\n # there was a bug where deepcopy would modify the original object\n assert dict_keys_before == dict_keys_after\n assert dict_keys_before == dict_keys_copy\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"blocksize\", [64, 128] if not HIP_ENVIRONMENT else [128])\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\ndef test_params4bit_real_serialization(device, quant_type, blocksize, compress_statistics):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n original_tensor = torch.randn(300, 400)\n original_param = bnb.nn.Params4bit(\n data=original_tensor,\n quant_type=quant_type,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n )\n dict_keys_before = set(original_param.__dict__.keys())\n\n original_param.to(device) # change device to trigger quantization\n\n serialized_param = pickle.dumps(original_param)\n deserialized_param = pickle.loads(serialized_param)\n dict_keys_after = set(original_param.__dict__.keys())\n dict_keys_deserialized = set(deserialized_param.__dict__.keys())\n\n assert torch.equal(original_param.data, deserialized_param.data)\n assert original_param.requires_grad == deserialized_param.requires_grad == False\n assert original_param.quant_type == deserialized_param.quant_type\n assert original_param.blocksize == deserialized_param.blocksize\n assert original_param.compress_statistics == deserialized_param.compress_statistics\n assert original_param.quant_state == deserialized_param.quant_state\n\n # there was a bug where deepcopy would modify the original object\n assert dict_keys_before == dict_keys_after\n assert dict_keys_before == dict_keys_deserialized\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compute_dtype\", [torch.bfloat16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))\n@pytest.mark.parametrize(\"mode\", [\"default\", \"reduce-overhead\"], ids=id_formatter(\"mode\"))\n@pytest.mark.skipif(torch.__version__ < (2, 4), reason=\"Not supported in torch < 2.4\")\ndef test_linear4bit_torch_compile(device, quant_type, compute_dtype, compress_statistics, bias, fullgraph, mode):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if fullgraph and torch.__version__ < (2, 8, 0, \"dev\"):\n pytest.skip(\"fullgraph mode requires torch 2.8 or higher\")\n\n if device == \"cuda\" and platform.system() == \"Windows\":\n pytest.skip(\"Triton is not officially supported on Windows\")\n","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:tests.test_linear4bit.test_linear4bit_torch_compile","uri":"program://bitsandbytes/function/tests.test_linear4bit.test_linear4bit_torch_compile#L323-L395","kind":"function","name":"test_linear4bit_torch_compile","path":"tests/test_linear4bit.py","language":"python","start_line":323,"end_line":395,"context_start_line":303,"context_end_line":395,"code":" assert torch.equal(original_param.data, deserialized_param.data)\n assert original_param.requires_grad == deserialized_param.requires_grad == False\n assert original_param.quant_type == deserialized_param.quant_type\n assert original_param.blocksize == deserialized_param.blocksize\n assert original_param.compress_statistics == deserialized_param.compress_statistics\n assert original_param.quant_state == deserialized_param.quant_state\n\n # there was a bug where deepcopy would modify the original object\n assert dict_keys_before == dict_keys_after\n assert dict_keys_before == dict_keys_deserialized\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"quant_type\", [\"nf4\", \"fp4\"])\n@pytest.mark.parametrize(\"compute_dtype\", [torch.bfloat16, torch.float32], ids=describe_dtype)\n@pytest.mark.parametrize(\"compress_statistics\", TRUE_FALSE, ids=id_formatter(\"compress_statistics\"))\n@pytest.mark.parametrize(\"bias\", TRUE_FALSE, ids=id_formatter(\"bias\"))\n@pytest.mark.parametrize(\"fullgraph\", TRUE_FALSE, ids=id_formatter(\"fullgraph\"))\n@pytest.mark.parametrize(\"mode\", [\"default\", \"reduce-overhead\"], ids=id_formatter(\"mode\"))\n@pytest.mark.skipif(torch.__version__ < (2, 4), reason=\"Not supported in torch < 2.4\")\ndef test_linear4bit_torch_compile(device, quant_type, compute_dtype, compress_statistics, bias, fullgraph, mode):\n if device == \"hpu\" and not is_supported_on_hpu(quant_type):\n pytest.skip(\"This configuration is not supported on HPU.\")\n\n if fullgraph and torch.__version__ < (2, 8, 0, \"dev\"):\n pytest.skip(\"fullgraph mode requires torch 2.8 or higher\")\n\n if device == \"cuda\" and platform.system() == \"Windows\":\n pytest.skip(\"Triton is not officially supported on Windows\")\n\n # Has a strange regression on Linux aarch64 CPU in torch==2.6.0 when fullgraph=False.\n if (\n not fullgraph\n and device == \"cpu\"\n and platform.machine() == \"aarch64\"\n and platform.system() == \"Linux\"\n and ((2, 7) > torch.__version__ >= (2, 6))\n ):\n pytest.xfail(\"Regression in torch==2.6.0 on Linux aarch64 CPU\")\n\n dim = 256\n batch_size = 16\n\n torch.compiler.reset()\n\n # Create a small network with Linear4bit layers\n net = torch.nn.Sequential(\n *[\n bnb.nn.Linear4bit(\n dim,\n dim,\n bias=bias,\n compute_dtype=compute_dtype,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n for _ in range(4)\n ]\n ).to(device)\n\n # Create input tensor\n x = torch.randn(batch_size, dim, dtype=compute_dtype, device=device)\n\n # Get reference output before compilation\n with torch.no_grad():\n ref_output = net(x)\n\n # Compile the model\n compile_backend = \"hpu_backend\" if device == \"hpu\" else \"inductor\"\n compiled_net = torch.compile(net, fullgraph=fullgraph, mode=mode, backend=compile_backend)\n\n # Get output from compiled model\n with torch.no_grad():\n compiled_output = compiled_net(x)\n\n # Check outputs match\n assert compiled_output.shape == ref_output.shape\n assert compiled_output.device == ref_output.device\n assert compiled_output.dtype == ref_output.dtype\n torch.testing.assert_close(compiled_output, ref_output)\n\n # Test with gradients\n x.requires_grad_(True)\n y1 = net(x).sum()\n y1.backward()\n grad_ref = x.grad.clone()\n\n x.grad = None\n y2 = compiled_net(x).sum()\n y2.backward()\n grad_compiled = x.grad.clone()\n\n torch.testing.assert_close(grad_compiled, grad_ref)","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:examples.compile_inference","uri":"program://bitsandbytes/module/examples.compile_inference#L1-L32","kind":"module","name":"examples.compile_inference","path":"examples/compile_inference.py","language":"python","start_line":1,"end_line":32,"context_start_line":1,"context_end_line":32,"code":"import torch\nimport torch._dynamo\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\n# torch._dynamo.config.suppress_errors = True\n\ntorch.set_float32_matmul_precision(\"high\")\n\nquantization_config = BitsAndBytesConfig(load_in_8bit=True)\n\n# torch._dynamo.config.capture_dynamic_output_shape_ops = True\n\nmodel_id = \"google/gemma-2-2b-it\"\n# model_id = \"Qwen/Qwen2.5-7B\"\n\ntokenizer = AutoTokenizer.from_pretrained(model_id)\nmodel = AutoModelForCausalLM.from_pretrained(\n model_id,\n quantization_config=quantization_config,\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,\n)\n\ninput_text = \"Write me a poem about Machine Learning.\"\ninput_ids = tokenizer(input_text, return_tensors=\"pt\").to(model.device)\n\n# model.forward = torch.compile(model.forward, fullgraph=True)\n\nmodel = torch.compile(model)\n\noutputs = model.generate(**input_ids, max_new_tokens=32)\nprint(tokenizer.decode(outputs[0]))","source_hash":"c6c2f00316839af200111d05ba467bcf6a8db180b7522cd7299b914474876020","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:examples.int8_inference_huggingface","uri":"program://bitsandbytes/module/examples.int8_inference_huggingface#L1-L19","kind":"module","name":"examples.int8_inference_huggingface","path":"examples/int8_inference_huggingface.py","language":"python","start_line":1,"end_line":19,"context_start_line":1,"context_end_line":19,"code":"import torch\nfrom transformers import LlamaForCausalLM, LlamaTokenizer\n\nMAX_NEW_TOKENS = 128\nmodel_name = \"meta-llama/Llama-2-7b-hf\"\n\ntext = \"Hamburg is in which country?\\n\"\ntokenizer = LlamaTokenizer.from_pretrained(model_name)\ninput_ids = tokenizer(text, return_tensors=\"pt\").input_ids\n\nmax_memory = f\"{int(torch.cuda.mem_get_info()[0] / 1024**3) - 2}GB\"\n\nn_gpus = torch.cuda.device_count()\nmax_memory = {i: max_memory for i in range(n_gpus)}\n\nmodel = LlamaForCausalLM.from_pretrained(model_name, device_map=\"auto\", load_in_8bit=True, max_memory=max_memory)\n\ngenerated_ids = model.generate(input_ids, max_length=MAX_NEW_TOKENS)\nprint(tokenizer.decode(generated_ids[0], skip_special_tokens=True))","source_hash":"719ba7d86b5082cfd9e137ced3f117544f8cac4fd65ea17bb797bf509d52b401","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs","uri":"program://bitsandbytes/module/bitsandbytes.cuda_specs#L1-L102","kind":"module","name":"bitsandbytes.cuda_specs","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":1,"end_line":102,"context_start_line":1,"context_end_line":102,"code":"import dataclasses\nfrom functools import lru_cache\nimport logging\nimport re\nimport subprocess\nfrom typing import Optional\n\nimport torch\n\n\n@dataclasses.dataclass(frozen=True)\nclass CUDASpecs:\n highest_compute_capability: tuple[int, int]\n cuda_version_string: str\n cuda_version_tuple: tuple[int, int]\n\n @property\n def has_imma(self) -> bool:\n return torch.version.hip or self.highest_compute_capability >= (7, 5)\n\n\ndef get_compute_capabilities() -> list[tuple[int, int]]:\n return sorted(torch.cuda.get_device_capability(torch.cuda.device(i)) for i in range(torch.cuda.device_count()))\n\n\n@lru_cache(None)\ndef get_cuda_version_tuple() -> Optional[tuple[int, int]]:\n \"\"\"Get CUDA/HIP version as a tuple of (major, minor).\"\"\"\n try:\n if torch.version.cuda:\n version_str = torch.version.cuda\n elif torch.version.hip:\n version_str = torch.version.hip\n else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))\n return None\n except (AttributeError, ValueError, IndexError):\n return None\n\n\ndef get_cuda_version_string() -> Optional[str]:\n \"\"\"Get CUDA/HIP version as a string.\"\"\"\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n major, minor = version_tuple\n return f\"{major * 10 + minor}\"\n\n\ndef get_cuda_specs() -> Optional[CUDASpecs]:\n \"\"\"Get CUDA/HIP specifications.\"\"\"\n if not torch.cuda.is_available():\n return None\n\n try:\n compute_capabilities = get_compute_capabilities()\n if not compute_capabilities:\n return None\n\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n\n version_string = get_cuda_version_string()\n if version_string is None:\n return None\n\n return CUDASpecs(\n highest_compute_capability=compute_capabilities[-1],\n cuda_version_string=version_string,\n cuda_version_tuple=version_tuple,\n )\n except Exception:\n return None\n\n\ndef get_rocm_gpu_arch() -> str:\n \"\"\"Get ROCm GPU architecture.\"\"\"\n logger = logging.getLogger(__name__)\n try:\n if torch.version.hip:\n result = subprocess.run([\"rocminfo\"], capture_output=True, text=True)\n match = re.search(r\"Name:\\s+gfx([a-zA-Z\\d]+)\", result.stdout)\n if match:\n return \"gfx\" + match.group(1)\n else:\n return \"unknown\"\n else:\n return \"unknown\"\n except Exception as e:\n logger.error(f\"Could not detect ROCm GPU architecture: {e}\")\n if torch.cuda.is_available():\n logger.warning(\n \"\"\"\nROCm GPU architecture detection failed despite ROCm being available.\n \"\"\",\n )\n return \"unknown\"","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.CUDASpecs","uri":"program://bitsandbytes/class/bitsandbytes.cuda_specs.CUDASpecs#L12-L19","kind":"class","name":"CUDASpecs","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":12,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"import dataclasses\nfrom functools import lru_cache\nimport logging\nimport re\nimport subprocess\nfrom typing import Optional\n\nimport torch\n\n\n@dataclasses.dataclass(frozen=True)\nclass CUDASpecs:\n highest_compute_capability: tuple[int, int]\n cuda_version_string: str\n cuda_version_tuple: tuple[int, int]\n\n @property\n def has_imma(self) -> bool:\n return torch.version.hip or self.highest_compute_capability >= (7, 5)\n\n\ndef get_compute_capabilities() -> list[tuple[int, int]]:\n return sorted(torch.cuda.get_device_capability(torch.cuda.device(i)) for i in range(torch.cuda.device_count()))\n\n\n@lru_cache(None)\ndef get_cuda_version_tuple() -> Optional[tuple[int, int]]:\n \"\"\"Get CUDA/HIP version as a tuple of (major, minor).\"\"\"\n try:\n if torch.version.cuda:\n version_str = torch.version.cuda\n elif torch.version.hip:\n version_str = torch.version.hip\n else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.get_compute_capabilities","uri":"program://bitsandbytes/function/bitsandbytes.cuda_specs.get_compute_capabilities#L22-L23","kind":"function","name":"get_compute_capabilities","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":22,"end_line":23,"context_start_line":2,"context_end_line":43,"code":"from functools import lru_cache\nimport logging\nimport re\nimport subprocess\nfrom typing import Optional\n\nimport torch\n\n\n@dataclasses.dataclass(frozen=True)\nclass CUDASpecs:\n highest_compute_capability: tuple[int, int]\n cuda_version_string: str\n cuda_version_tuple: tuple[int, int]\n\n @property\n def has_imma(self) -> bool:\n return torch.version.hip or self.highest_compute_capability >= (7, 5)\n\n\ndef get_compute_capabilities() -> list[tuple[int, int]]:\n return sorted(torch.cuda.get_device_capability(torch.cuda.device(i)) for i in range(torch.cuda.device_count()))\n\n\n@lru_cache(None)\ndef get_cuda_version_tuple() -> Optional[tuple[int, int]]:\n \"\"\"Get CUDA/HIP version as a tuple of (major, minor).\"\"\"\n try:\n if torch.version.cuda:\n version_str = torch.version.cuda\n elif torch.version.hip:\n version_str = torch.version.hip\n else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))\n return None\n except (AttributeError, ValueError, IndexError):\n return None\n","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.get_cuda_version_tuple","uri":"program://bitsandbytes/function/bitsandbytes.cuda_specs.get_cuda_version_tuple#L27-L42","kind":"function","name":"get_cuda_version_tuple","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":27,"end_line":42,"context_start_line":7,"context_end_line":62,"code":"\nimport torch\n\n\n@dataclasses.dataclass(frozen=True)\nclass CUDASpecs:\n highest_compute_capability: tuple[int, int]\n cuda_version_string: str\n cuda_version_tuple: tuple[int, int]\n\n @property\n def has_imma(self) -> bool:\n return torch.version.hip or self.highest_compute_capability >= (7, 5)\n\n\ndef get_compute_capabilities() -> list[tuple[int, int]]:\n return sorted(torch.cuda.get_device_capability(torch.cuda.device(i)) for i in range(torch.cuda.device_count()))\n\n\n@lru_cache(None)\ndef get_cuda_version_tuple() -> Optional[tuple[int, int]]:\n \"\"\"Get CUDA/HIP version as a tuple of (major, minor).\"\"\"\n try:\n if torch.version.cuda:\n version_str = torch.version.cuda\n elif torch.version.hip:\n version_str = torch.version.hip\n else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))\n return None\n except (AttributeError, ValueError, IndexError):\n return None\n\n\ndef get_cuda_version_string() -> Optional[str]:\n \"\"\"Get CUDA/HIP version as a string.\"\"\"\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n major, minor = version_tuple\n return f\"{major * 10 + minor}\"\n\n\ndef get_cuda_specs() -> Optional[CUDASpecs]:\n \"\"\"Get CUDA/HIP specifications.\"\"\"\n if not torch.cuda.is_available():\n return None\n\n try:\n compute_capabilities = get_compute_capabilities()\n if not compute_capabilities:\n return None","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.get_cuda_version_string","uri":"program://bitsandbytes/function/bitsandbytes.cuda_specs.get_cuda_version_string#L45-L51","kind":"function","name":"get_cuda_version_string","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":45,"end_line":51,"context_start_line":25,"context_end_line":71,"code":"\n@lru_cache(None)\ndef get_cuda_version_tuple() -> Optional[tuple[int, int]]:\n \"\"\"Get CUDA/HIP version as a tuple of (major, minor).\"\"\"\n try:\n if torch.version.cuda:\n version_str = torch.version.cuda\n elif torch.version.hip:\n version_str = torch.version.hip\n else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))\n return None\n except (AttributeError, ValueError, IndexError):\n return None\n\n\ndef get_cuda_version_string() -> Optional[str]:\n \"\"\"Get CUDA/HIP version as a string.\"\"\"\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n major, minor = version_tuple\n return f\"{major * 10 + minor}\"\n\n\ndef get_cuda_specs() -> Optional[CUDASpecs]:\n \"\"\"Get CUDA/HIP specifications.\"\"\"\n if not torch.cuda.is_available():\n return None\n\n try:\n compute_capabilities = get_compute_capabilities()\n if not compute_capabilities:\n return None\n\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n\n version_string = get_cuda_version_string()\n if version_string is None:\n return None\n","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.get_cuda_specs","uri":"program://bitsandbytes/function/bitsandbytes.cuda_specs.get_cuda_specs#L54-L78","kind":"function","name":"get_cuda_specs","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":54,"end_line":78,"context_start_line":34,"context_end_line":98,"code":" else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))\n return None\n except (AttributeError, ValueError, IndexError):\n return None\n\n\ndef get_cuda_version_string() -> Optional[str]:\n \"\"\"Get CUDA/HIP version as a string.\"\"\"\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n major, minor = version_tuple\n return f\"{major * 10 + minor}\"\n\n\ndef get_cuda_specs() -> Optional[CUDASpecs]:\n \"\"\"Get CUDA/HIP specifications.\"\"\"\n if not torch.cuda.is_available():\n return None\n\n try:\n compute_capabilities = get_compute_capabilities()\n if not compute_capabilities:\n return None\n\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n\n version_string = get_cuda_version_string()\n if version_string is None:\n return None\n\n return CUDASpecs(\n highest_compute_capability=compute_capabilities[-1],\n cuda_version_string=version_string,\n cuda_version_tuple=version_tuple,\n )\n except Exception:\n return None\n\n\ndef get_rocm_gpu_arch() -> str:\n \"\"\"Get ROCm GPU architecture.\"\"\"\n logger = logging.getLogger(__name__)\n try:\n if torch.version.hip:\n result = subprocess.run([\"rocminfo\"], capture_output=True, text=True)\n match = re.search(r\"Name:\\s+gfx([a-zA-Z\\d]+)\", result.stdout)\n if match:\n return \"gfx\" + match.group(1)\n else:\n return \"unknown\"\n else:\n return \"unknown\"\n except Exception as e:\n logger.error(f\"Could not detect ROCm GPU architecture: {e}\")\n if torch.cuda.is_available():\n logger.warning(\n \"\"\"","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.get_rocm_gpu_arch","uri":"program://bitsandbytes/function/bitsandbytes.cuda_specs.get_rocm_gpu_arch#L81-L102","kind":"function","name":"get_rocm_gpu_arch","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":81,"end_line":102,"context_start_line":61,"context_end_line":102,"code":" if not compute_capabilities:\n return None\n\n version_tuple = get_cuda_version_tuple()\n if version_tuple is None:\n return None\n\n version_string = get_cuda_version_string()\n if version_string is None:\n return None\n\n return CUDASpecs(\n highest_compute_capability=compute_capabilities[-1],\n cuda_version_string=version_string,\n cuda_version_tuple=version_tuple,\n )\n except Exception:\n return None\n\n\ndef get_rocm_gpu_arch() -> str:\n \"\"\"Get ROCm GPU architecture.\"\"\"\n logger = logging.getLogger(__name__)\n try:\n if torch.version.hip:\n result = subprocess.run([\"rocminfo\"], capture_output=True, text=True)\n match = re.search(r\"Name:\\s+gfx([a-zA-Z\\d]+)\", result.stdout)\n if match:\n return \"gfx\" + match.group(1)\n else:\n return \"unknown\"\n else:\n return \"unknown\"\n except Exception as e:\n logger.error(f\"Could not detect ROCm GPU architecture: {e}\")\n if torch.cuda.is_available():\n logger.warning(\n \"\"\"\nROCm GPU architecture detection failed despite ROCm being available.\n \"\"\",\n )\n return \"unknown\"","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cuda_specs.has_imma","uri":"program://bitsandbytes/function/bitsandbytes.cuda_specs.has_imma#L18-L19","kind":"function","name":"has_imma","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":18,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"import dataclasses\nfrom functools import lru_cache\nimport logging\nimport re\nimport subprocess\nfrom typing import Optional\n\nimport torch\n\n\n@dataclasses.dataclass(frozen=True)\nclass CUDASpecs:\n highest_compute_capability: tuple[int, int]\n cuda_version_string: str\n cuda_version_tuple: tuple[int, int]\n\n @property\n def has_imma(self) -> bool:\n return torch.version.hip or self.highest_compute_capability >= (7, 5)\n\n\ndef get_compute_capabilities() -> list[tuple[int, int]]:\n return sorted(torch.cuda.get_device_capability(torch.cuda.device(i)) for i in range(torch.cuda.device_count()))\n\n\n@lru_cache(None)\ndef get_cuda_version_tuple() -> Optional[tuple[int, int]]:\n \"\"\"Get CUDA/HIP version as a tuple of (major, minor).\"\"\"\n try:\n if torch.version.cuda:\n version_str = torch.version.cuda\n elif torch.version.hip:\n version_str = torch.version.hip\n else:\n return None\n\n parts = version_str.split(\".\")\n if len(parts) >= 2:\n return tuple(map(int, parts[:2]))","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes._ops","uri":"program://bitsandbytes/module/bitsandbytes._ops#L1-L433","kind":"module","name":"bitsandbytes._ops","path":"bitsandbytes/_ops.py","language":"python","start_line":1,"end_line":433,"context_start_line":1,"context_end_line":433,"code":"from collections.abc import Sequence\nfrom math import prod\nfrom typing import Optional\n\nimport torch\n\n_IS_TORCH_GTE_24 = False\n\nif hasattr(torch.library, \"register_fake\"):\n _IS_TORCH_GTE_24 = True\n register_fake = torch.library.register_fake\n register_kernel = torch.library.register_kernel\nelse:\n # PyTorch <= 2.3\n register_fake = torch.library.impl_abstract\n register_kernel = torch.library.impl\n\n# Int8 mixed precision matmul + dequant + bias\ntorch.library.define(\n \"bitsandbytes::int8_mixed_scaled_mm\",\n \"(Tensor A, Tensor CA, Tensor CB, Tensor SCA, Tensor SCB, Tensor? outlier_cols=None, Tensor? bias=None) -> (Tensor, Tensor?)\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_mixed_scaled_mm\")\ndef _(\n A: torch.Tensor,\n CA: torch.Tensor,\n CB: torch.Tensor,\n SCA: torch.Tensor,\n SCB: torch.Tensor,\n outlier_cols: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n) -> tuple[torch.Tensor, Optional[torch.Tensor]]:\n shapeC = (*CA.shape[:-1], CB.shape[0])\n\n out = torch.empty(shapeC, device=A.device, dtype=A.dtype)\n\n outlier_cols = torch.library.get_ctx().new_dynamic_size()\n subA = A.new_empty(outlier_cols, dtype=torch.int64)\n\n return out, subA\n\n\n# Higher level op: int8 matmul + dequant + bias\ntorch.library.define(\n \"bitsandbytes::int8_scaled_mm\",\n \"(Tensor A, Tensor B, Tensor row_stats, Tensor col_stats, Tensor? bias=None, ScalarType? dtype=None) -> Tensor\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_scaled_mm\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n bias: Optional[torch.Tensor] = None,\n dtype: Optional[torch.dtype] = None,\n) -> torch.Tensor:\n shapeC = (*A.shape[:-1], B.shape[0])\n return torch.empty(shapeC, device=A.device, dtype=dtype or torch.float16)\n\n\ntorch.library.define(\n \"bitsandbytes::int8_linear_matmul\",\n \"(Tensor A, Tensor B) -> Tensor\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_linear_matmul\")\ndef _(A: torch.Tensor, B: torch.Tensor):\n torch._check(A.dtype == torch.int8, lambda: \"A must be int8\")\n torch._check(B.dtype == torch.int8, lambda: \"B must be int8\")\n shapeC = (*A.shape[:-1], B.shape[0])\n return torch.empty(shapeC, device=A.device, dtype=torch.int32)\n\n\n# More info on `out` overloads:\n# https://github.com/pytorch/pytorch/issues/125044\ntorch.library.define(\n \"bitsandbytes::int8_linear_matmul.out\",\n \"(Tensor A, Tensor B, Tensor! out) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_linear_matmul.out\")\ndef _(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n shapeC = (*A.shape[:-1], B.shape[0])\n\n torch._check(A.dtype == torch.int8, lambda: \"A must be int8\")\n torch._check(B.dtype == torch.int8, lambda: \"B must be int8\")\n torch._check(out.shape == shapeC, lambda: f\"Expected out.shape == {shapeC}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == torch.int32, lambda: f\"Expected out.dtype == int32, got {out.dtype}\")\n\n\ntorch.library.define(\n \"bitsandbytes::int8_vectorwise_quant\",\n \"(Tensor A, float threshold=0.0) -> (Tensor, Tensor, Tensor?)\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_vectorwise_quant\")\ndef _(A: torch.Tensor, threshold=0.0):\n out_row = torch.empty(A.shape, device=A.device, dtype=torch.int8)\n row_stats = torch.empty(prod(A.shape[:-1]), device=A.device, dtype=torch.float32)\n\n if threshold == 0.0:\n return out_row, row_stats, None\n\n outlier_cols = torch.library.get_ctx().new_dynamic_size()\n\n return out_row, row_stats, A.new_empty(outlier_cols, dtype=torch.int64)\n\n\ntorch.library.define(\"bitsandbytes::int8_vectorwise_dequant\", \"(Tensor A, Tensor stats) -> Tensor\")\n\n\n@register_fake(\"bitsandbytes::int8_vectorwise_dequant\")\ndef _(A: torch.Tensor, stats: torch.Tensor) -> torch.Tensor:\n torch._check(A.dtype == torch.int8, lambda: \"A must be int8\")\n return torch.empty_like(A, dtype=torch.float32)\n\n\n# Default PyTorch-native implementation\n@register_kernel(\"bitsandbytes::int8_vectorwise_dequant\", \"default\")\ndef _(A: torch.Tensor, stats: torch.Tensor):\n # To dequantize we divide by 127, or multiply by the reciprocal.\n return A * stats.view(-1, 1) * 7.874015718698502e-3\n\n\ntorch.library.define(\n \"bitsandbytes::int8_mm_dequant\",\n \"(Tensor A, Tensor row_stats, Tensor col_stats, ScalarType? dtype=None, Tensor? bias=None) -> Tensor\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_mm_dequant\")\ndef _(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n bias: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n torch._check(A.dtype == torch.int32, lambda: \"A must be int32\")\n return torch.empty_like(A, dtype=dtype or torch.float16)\n\n\ntorch.library.define(\n \"bitsandbytes::int8_double_quant\",\n \"(Tensor A, float threshold=0.0) -> (Tensor, Tensor, Tensor, Tensor, Tensor?)\",\n)\n\n\n@register_fake(\"bitsandbytes::int8_double_quant\")\ndef _(\n A: torch.Tensor,\n threshold=0.0,\n) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n out_row = torch.empty_like(A, dtype=torch.int8)\n out_col = torch.empty_like(A, dtype=torch.int8)\n row_stats = torch.empty(prod(A.shape[:-1]), device=A.device, dtype=torch.float32)\n col_stats = torch.empty(A.shape[-1], device=A.device, dtype=torch.float32)\n outlier_n = torch.library.get_ctx().new_dynamic_size()\n outlier_cols = A.new_empty(outlier_n, dtype=torch.int64)\n return out_row, out_col, row_stats, col_stats, outlier_cols\n\n\ntorch.library.define(\n \"bitsandbytes::dequantize_4bit\",\n \"(Tensor A, Tensor absmax, int blocksize, str quant_type, int[] shape, ScalarType dtype) -> Tensor\",\n)\n\n\n@register_fake(\"bitsandbytes::dequantize_4bit\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n return torch.empty(shape, dtype=dtype, device=A.device)\n\n\ntorch.library.define(\n \"bitsandbytes::dequantize_4bit.out\",\n \"(Tensor A, Tensor absmax, int blocksize, str quant_type, int[] shape, ScalarType dtype, Tensor! out) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::dequantize_4bit.out\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n\n\ntorch.library.define(\n \"bitsandbytes::quantize_4bit\",\n \"(Tensor A, int blocksize, str quant_type, ScalarType quant_storage) -> (Tensor, Tensor)\",\n)\n\n\n@register_fake(\"bitsandbytes::quantize_4bit\")\ndef _(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)\n out = torch.empty(((n + 1) // (quant_storage.itemsize * 2), 1), device=A.device, dtype=quant_storage)\n return out, absmax\n\n\ntorch.library.define(\n \"bitsandbytes::dequantize_blockwise\",\n \"(Tensor A, Tensor absmax, Tensor code, int blocksize, ScalarType dtype) -> Tensor\",\n)\n\n\n@register_fake(\"bitsandbytes::dequantize_blockwise\")\ndef _(A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n return torch.empty_like(A, dtype=dtype)\n\n\ntorch.library.define(\n \"bitsandbytes::dequantize_blockwise.out\",\n \"(Tensor A, Tensor absmax, Tensor code, int blocksize, ScalarType dtype, Tensor! out) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::dequantize_blockwise.out\")\ndef _(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n):\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n\n\ntorch.library.define(\"bitsandbytes::quantize_blockwise\", \"(Tensor A, Tensor code, int blocksize) -> (Tensor, Tensor)\")\n\n\n@register_fake(\"bitsandbytes::quantize_blockwise\")\ndef _(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)\n out = torch.empty_like(A, dtype=torch.uint8)\n return out, absmax\n\n\ntorch.library.define(\n \"bitsandbytes::gemv_4bit\",\n \"(Tensor A, Tensor B, int[] shapeB, Tensor absmax, Tensor code, int blocksize) -> Tensor\",\n)\n\n\n@register_fake(\"bitsandbytes::gemv_4bit\")\ndef _(\n A: torch.Tensor, B: torch.Tensor, shapeB: Sequence[int], absmax: torch.Tensor, code: torch.Tensor, blocksize: int\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.numel() == A.size(-1), lambda: f\"A must be a vector with leading dimensions of 1, got {A.shape}\")\n torch._check(\n A.dtype in [torch.float16, torch.bfloat16, torch.float32],\n lambda: f\"A must be float16, bfloat16, or float32, got {A.dtype}\",\n )\n torch._check(\n B.dtype in [torch.uint8, torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"B must be backed by storage of type uint8, bfloat16, float16, or float32, got {B.dtype}\",\n )\n shape = (*A.shape[:-1], shapeB[0])\n return torch.empty(shape, device=A.device, dtype=A.dtype)\n\n\ntorch.library.define(\n \"bitsandbytes::gemv_4bit.out\",\n \"(Tensor A, Tensor B, int[] shapeB, Tensor absmax, Tensor code, int blocksize, Tensor! out) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::gemv_4bit.out\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n torch._check(A.numel() == A.size(-1), lambda: f\"A must be a vector with leading dimensions of 1, got {A.shape}\")\n torch._check(\n A.dtype in [torch.float16, torch.bfloat16, torch.float32],\n lambda: f\"A must be float16, bfloat16, or float32, got {A.dtype}\",\n )\n torch._check(\n B.dtype in [torch.uint8, torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"B must be backed by storage of type uint8, bfloat16, float16, or float32, got {B.dtype}\",\n )\n torch._check(\n out.shape == (*A.shape[:-1], shapeB[0]),\n lambda: f\"Expected out.shape == {(*A.shape[:-1], shapeB[0])}, got {out.shape}\",\n )\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == A.dtype, lambda: f\"Expected out.dtype == {A.dtype}, got {out.dtype}\")\n\n\ntorch.library.define(\n \"bitsandbytes::optimizer_update_32bit\",\n \"(str optimizer_name, Tensor(a0!) g, Tensor(a1!) p, Tensor(a2!) state1, Tensor(a3!)? state2, Tensor(a4!)? unorm_vec, float max_unorm, float param_norm, float beta1, float beta2, float beta3, float alpha, float eps, float weight_decay, int step, float lr, float gnorm_scale, bool skip_zeros=False) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::optimizer_update_32bit\")\ndef _(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n torch._check(\n g.numel() == p.numel(),\n lambda: f\"g and p must have the same number of elements, got {g.numel()} and {p.numel()}\",\n )\n compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n torch._check(\n g.dtype in compute_dtypes,\n lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n )\n torch._check(\n g.dtype == p.dtype,\n lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n )\n\n\ntorch.library.define(\n \"bitsandbytes::optimizer_update_8bit_blockwise\",\n \"(str optimizer_name, Tensor(a0!) g, Tensor(a1!) p, Tensor(a2!) state1, Tensor(a3!)? state2, float beta1, float beta2, float beta3, float alpha, float eps, int step, float lr, Tensor(a4!) qmap1, Tensor(a5!)? qmap2, Tensor(a6!) absmax1, Tensor(a7!)? absmax2, float weight_decay, float gnorm_scale, bool skip_zeros=False) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::optimizer_update_8bit_blockwise\")\ndef _(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n torch._check(\n g.numel() == p.numel(),\n lambda: f\"g and p must have the same number of elements, got {g.numel()} and {p.numel()}\",\n )\n compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n torch._check(\n g.dtype in compute_dtypes,\n lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n )\n torch._check(\n g.dtype == p.dtype,\n lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n )\n torch._check(\n state1.dtype == torch.uint8,\n lambda: f\"state1 must be uint8, got {state1.dtype}\",\n )\n torch._check(\n qmap1.dtype == absmax1.dtype == torch.float32,\n lambda: f\"Expected qmap1 and absmax1 to be float32, got qmap1.dtype={qmap1.dtype}, absmax1.dtype={absmax1.dtype}\",\n )\n if state2 is not None:\n torch._check(\n state2.dtype == torch.uint8,\n lambda: f\"state2 must be uint8, got {state2.dtype}\",\n )\n torch._check(\n qmap2.dtype == absmax2.dtype == torch.float32,\n lambda: f\"Expected qmap2 and absmax2 to be float32, got qmap2.dtype={qmap2.dtype}, absmax2.dtype={absmax2.dtype}\",\n )","source_hash":"343fe18d26c8a3e6df8e482e5bcc5e77d1d3137ad4f0f523440a2064db1dbcf4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes._ops._","uri":"program://bitsandbytes/function/bitsandbytes._ops._#L382-L433","kind":"function","name":"_","path":"bitsandbytes/_ops.py","language":"python","start_line":382,"end_line":433,"context_start_line":362,"context_end_line":433,"code":" )\n compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n torch._check(\n g.dtype in compute_dtypes,\n lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n )\n torch._check(\n g.dtype == p.dtype,\n lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n )\n\n\ntorch.library.define(\n \"bitsandbytes::optimizer_update_8bit_blockwise\",\n \"(str optimizer_name, Tensor(a0!) g, Tensor(a1!) p, Tensor(a2!) state1, Tensor(a3!)? state2, float beta1, float beta2, float beta3, float alpha, float eps, int step, float lr, Tensor(a4!) qmap1, Tensor(a5!)? qmap2, Tensor(a6!) absmax1, Tensor(a7!)? absmax2, float weight_decay, float gnorm_scale, bool skip_zeros=False) -> ()\",\n)\n\n\n@register_fake(\"bitsandbytes::optimizer_update_8bit_blockwise\")\ndef _(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n torch._check(\n g.numel() == p.numel(),\n lambda: f\"g and p must have the same number of elements, got {g.numel()} and {p.numel()}\",\n )\n compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n torch._check(\n g.dtype in compute_dtypes,\n lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n )\n torch._check(\n g.dtype == p.dtype,\n lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n )\n torch._check(\n state1.dtype == torch.uint8,\n lambda: f\"state1 must be uint8, got {state1.dtype}\",\n )\n torch._check(\n qmap1.dtype == absmax1.dtype == torch.float32,\n lambda: f\"Expected qmap1 and absmax1 to be float32, got qmap1.dtype={qmap1.dtype}, absmax1.dtype={absmax1.dtype}\",\n )\n if state2 is not None:\n torch._check(\n state2.dtype == torch.uint8,\n lambda: f\"state2 must be uint8, got {state2.dtype}\",\n )\n torch._check(\n qmap2.dtype == absmax2.dtype == torch.float32,\n lambda: f\"Expected qmap2 and absmax2 to be float32, got qmap2.dtype={qmap2.dtype}, absmax2.dtype={absmax2.dtype}\",\n )","source_hash":"343fe18d26c8a3e6df8e482e5bcc5e77d1d3137ad4f0f523440a2064db1dbcf4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional","uri":"program://bitsandbytes/module/bitsandbytes.functional#L1-L2202","kind":"module","name":"bitsandbytes.functional","path":"bitsandbytes/functional.py","language":"python","start_line":1,"end_line":2202,"context_start_line":1,"context_end_line":2202,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections.abc import Iterable\nimport ctypes as ct\nimport itertools\nfrom math import prod\nfrom typing import Any, Optional, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import deprecated\n\nfrom bitsandbytes.utils import pack_dict_to_tensor, unpack_tensor_to_dict\n\nfrom .cextension import HIP_ENVIRONMENT, lib\n\nname2qmap = {}\n\n\"\"\"C FUNCTIONS FOR OPTIMIZERS\"\"\"\nstr2optimizer8bit = {\n \"adam\": (\n lib.cadam_static_8bit_grad_32,\n lib.cadam_static_8bit_grad_16,\n ),\n \"momentum\": (\n lib.cmomentum_static_8bit_grad_32,\n lib.cmomentum_static_8bit_grad_16,\n ),\n \"rmsprop\": (\n lib.crmsprop_static_8bit_grad_32,\n lib.crmsprop_static_8bit_grad_16,\n ),\n \"lion\": (\n lib.clion_static_8bit_grad_32,\n lib.clion_static_8bit_grad_16,\n ),\n \"lamb\": (\n lib.cadam_static_8bit_grad_32,\n lib.cadam_static_8bit_grad_16,\n ),\n \"lars\": (\n lib.cmomentum_static_8bit_grad_32,\n lib.cmomentum_static_8bit_grad_16,\n ),\n}\n\n\nclass GlobalPageManager:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.paged_tensors = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def prefetch_all(self, to_cpu=False):\n # assume the first added, will be the\n # ones that are used first, so swap them in last\n # in the case they are evicted again\n for t in self.paged_tensors[::-1]:\n prefetch_tensor(t, to_cpu)\n\n\nclass CUBLAS_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = {}\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:\n prev_device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n self.context[device.index] = ct.c_void_p(lib.get_context())\n torch.cuda.set_device(prev_device)\n return self.context[device.index]\n\n\nclass Cusparse_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = ct.c_void_p(lib.get_cusparse())\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)\n\n# When multiple GPUs are present, we use a context manager to\n# switch to the correct device of a tensor before invoking our CUDA\n# kernels in the C++ library. However, when there's only one device\n# there is no need to incur the overhead of cudaGetDevice/cudaSetDevice.\nif torch.cuda.device_count() > 1:\n\n def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:\n import contextlib\n\n def _cuda_device_of(a: torch.Tensor):\n return contextlib.nullcontext()\n\n\ndef get_paged(*shape, dtype=torch.float32, device=FIRST_CUDA_DEVICE):\n num_bytes = dtype.itemsize * prod(shape)\n cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))\n c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))\n new_array = np.ctypeslib.as_array(c_ptr, shape=shape)\n out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)\n out.is_paged = True\n out.page_deviceid = device.index\n return out\n\n\ndef prefetch_tensor(A: torch.Tensor, to_cpu=False):\n assert A.is_paged, \"Only paged tensors can be prefetched!\"\n if to_cpu:\n deviceid = -1\n else:\n deviceid = A.page_deviceid\n\n lib.cprefetch(get_ptr(A), ct.c_size_t(A.nbytes), ct.c_int32(deviceid))\n\n\ndef elementwise_func(func_name, A, B, value, prefetch=True):\n func = None\n if A.dtype == torch.float32:\n func = getattr(lib, f\"c{func_name}_fp32\", None)\n cvalue = ct.c_float(value)\n elif A.dtype == torch.uint8:\n func = getattr(lib, f\"c{func_name}_uint8\", None)\n cvalue = ct.c_uint8(value)\n\n if func is None:\n raise NotImplementedError(f\"Function not implemented: {func_name}\")\n\n is_managed = getattr(A, \"is_managed\", False)\n if is_managed and prefetch:\n prefetch_tensor(A)\n if B is not None:\n prefetch_tensor(B)\n\n func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))\n if A.is_paged or B.is_paged:\n # paged function are fully asynchronous\n # if we return from this function, we want to the tensor\n # to be in the correct state, that is the final state after the\n # operation occurred. So we synchronize.\n torch.cuda.synchronize()\n\n\ndef fill(A, value, device=None, prefetch=True):\n elementwise_func(\"fill\", A, None, value)\n\n\ndef _mul(A, B, device=None):\n elementwise_func(\"_mul\", A, B, 0)\n\n\ndef create_linear_map(signed=True, total_bits=8, add_zero=True):\n sign = -1.0 if signed else 0.0\n total_values = 2**total_bits\n if add_zero or total_bits < 8:\n # add a zero\n # since we simulate less bits by having zeros in the data type, we\n # we need to center the quantization around zero and as such lose\n # a single value\n total_values = 2**total_bits if not signed else 2**total_bits - 1\n\n values = torch.linspace(sign, 1.0, total_values)\n gap = 256 - values.numel()\n if gap == 0:\n return values\n else:\n l = values.numel() // 2 # noqa: E741\n return torch.Tensor(values[:l].tolist() + [0] * gap + values[l:].tolist())\n\n\ndef create_normal_map(offset=0.9677083, use_extra_value=True):\n try:\n from scipy.stats import norm\n except ImportError as ie:\n raise ImportError(\n \"Scipy is required for `create_normal_map`. Install `bitsandbytes` with the `[test]` extra.\",\n ) from ie\n\n if use_extra_value:\n # one more positive value, this is an asymmetric type\n v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()\n v2 = [0] * (256 - 15) ## we have 15 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n else:\n v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()\n v2 = [0] * (256 - 14) ## we have 14 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n\n v = v1 + v2 + v3\n\n values = torch.Tensor(v)\n values = values.sort().values\n values /= values.max()\n\n assert values.numel() == 256\n\n return values\n\n\ndef create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):\n e = exponent_bits\n p = precision_bits\n has_sign = 1 if signed else 0\n assert e + p == total_bits - has_sign\n # the exponent is biased to 2^(e-1) -1 == 0\n evalues = []\n for i, val in enumerate(range(-(2 ** (exponent_bits - has_sign)), 2 ** (exponent_bits - has_sign), 1)):\n evalues.append(2**val)\n\n values = []\n lst = list(itertools.product([0, 1], repeat=precision_bits))\n # for ev in evalues:\n bias = 2 ** (exponent_bits - 1)\n for evalue in range(2 ** (exponent_bits)):\n for bit_pattern in lst:\n value = 1 if evalue != 0 else 0\n for i, pval in enumerate(list(bit_pattern)):\n value += pval * (2 ** -(i + 1))\n if evalue == 0:\n # subnormals\n value = value * 2**-(bias)\n else:\n # normals\n value = value * 2 ** -(evalue - bias - 1)\n values.append(value)\n if signed:\n values.append(-value)\n\n assert len(values) == 2**total_bits\n values.sort()\n if total_bits < 8:\n gap = 256 - len(values)\n for i in range(gap):\n values.append(0)\n values.sort()\n code = torch.tensor(values)\n code /= code.max()\n\n return code\n\n\ndef create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):\n \"\"\"\n Creates the dynamic quantiztion map.\n\n The dynamic data type is made up of a dynamic exponent and\n fraction. As the exponent increase from 0 to -7 the number\n of bits available for the fraction shrinks.\n\n This is a generalization of the dynamic type where a certain\n number of the bits and be reserved for the linear quantization\n region (the fraction). n determines the maximum number of\n exponent bits.\n\n For more details see\n (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]\n \"\"\"\n\n data = []\n # these are additional items that come from the case\n # where all the exponent bits are zero and no\n # indicator bit is present\n non_sign_bits = total_bits - 1\n additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1\n for i in range(max_exponent_bits):\n fraction_items = int(\n 2 ** (i + non_sign_bits - max_exponent_bits) + 1\n if signed\n else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1,\n )\n boundaries = torch.linspace(0.1, 1, fraction_items, dtype=torch.float32)\n means = (boundaries[:-1] + boundaries[1:]) / 2.0\n data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n if signed:\n data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n\n if additional_items > 0:\n boundaries = torch.linspace(0.1, 1, additional_items + 1, dtype=torch.float32)\n means = (boundaries[:-1] + boundaries[1:]) / 2.0\n data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n if signed:\n data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n\n data.append(0)\n data.append(1.0)\n\n assert len(data) == 2**total_bits\n\n gap = 256 - len(data)\n for i in range(gap):\n data.append(0)\n\n data.sort()\n return torch.tensor(data, dtype=torch.float32)\n\n\ndef is_on_gpu(tensors: Iterable[Optional[torch.Tensor]]):\n \"\"\"Verifies that the input tensors are all on the same device.\n\n An input tensor may also be marked as `paged`, in which case the device placement is ignored.\n\n Args:\n tensors (`Iterable[Optional[torch.Tensor]]`): A list of tensors to verify.\n\n Raises:\n `RuntimeError`: Raised when the verification fails.\n\n Returns:\n `Literal[True]`\n \"\"\"\n\n on_gpu = True\n gpu_ids = set()\n\n for t in tensors:\n # NULL pointers and paged tensors are OK.\n if t is not None and not getattr(t, \"is_paged\", False):\n on_gpu &= t.device.type != \"cpu\"\n gpu_ids.add((t.device.type, t.device.index))\n\n if not on_gpu:\n raise RuntimeError(\n f\"All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n\n if len(gpu_ids) > 1:\n raise RuntimeError(\n f\"Input tensors need to be on the same GPU, but found the following tensor and device combinations:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n return on_gpu\n\n\ndef _get_tensor_stream(tensor: Tensor) -> ct.c_void_p:\n # We use the raw stream for performance reasons.\n if tensor.device.type == \"xpu\":\n return ct.c_void_p(torch._C._xpu_getCurrentRawStream(tensor.device.index))\n return ct.c_void_p(torch._C._cuda_getCurrentRawStream(tensor.device.index))\n\n\ndef get_ptr(A: Optional[Tensor]) -> Optional[ct.c_void_p]:\n \"\"\"Gets the memory address of the first element of a tenso\n\n Args:\n A (`Optional[Tensor]`): A PyTorch tensor.\n\n Returns:\n `Optional[ct.c_void_p]`: A pointer to the underlying tensor data.\n \"\"\"\n if A is None:\n return None\n\n return ct.c_void_p(A.data_ptr())\n\n\nclass QuantState:\n \"\"\"container for quantization state components to work with Params4bit and similar classes\"\"\"\n\n valid_quant_types = (\"fp4\", \"nf4\")\n valid_qs_type_keys = [f\"bitsandbytes__{x}\" for x in valid_quant_types]\n valid_qs_keys = [\n \"absmax\",\n \"quant_map\",\n \"nested_absmax\",\n \"nested_quant_map\",\n \"quant_state\",\n \"quant_type\",\n \"blocksize\",\n \"dtype\",\n \"shape\",\n \"nested_blocksize\",\n \"nested_dtype\",\n \"nested_offset\",\n ]\n\n def __init__(\n self,\n absmax,\n shape=None,\n code=None,\n blocksize=None,\n quant_type=None,\n dtype=None,\n offset=None,\n state2=None,\n ):\n self.absmax = absmax\n self.shape = shape\n self.code = code\n self.dtype = dtype\n self.blocksize = blocksize\n self.quant_type = quant_type\n self.offset = offset\n self.state2 = state2\n self.nested = state2 is not None\n\n def __getitem__(self, idx):\n \"\"\"\n ensures compatibility with older quant state scheme with nested lists.\n assumes the following layout:\n state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]\n state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]\n \"\"\"\n if self.nested:\n list_repr = [\n self.absmax,\n self.shape,\n self.dtype,\n self.blocksize,\n [self.offset, self.state2],\n self.quant_type,\n ]\n else:\n list_repr = [self.absmax, self.shape, self.dtype, self.blocksize, None, self.quant_type]\n return list_repr[idx]\n\n @classmethod\n def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> \"QuantState\":\n \"\"\"\n unpacks components of state_dict into QuantState\n where necessary, convert into strings, torch.dtype, ints, etc.\n\n qs_dict: based on state_dict, with only relevant keys, striped of prefixes.\n\n item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.\n \"\"\"\n\n # unpacking tensor with non-tensor components\n qs_key = [k for k, v in qs_dict.items() if \"quant_state\" in k and isinstance(v, torch.Tensor)]\n if not len(qs_key) and \"quant_type\" not in qs_dict:\n raise ValueError(\"Expected packed or unpacked quant_state items, found neither\")\n elif len(qs_key) != 1 or qs_key[0].split(\".\")[-1] not in cls.valid_qs_type_keys:\n raise ValueError(\n f\"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\\nDetected {qs_key}.\",\n )\n\n # unpacking minor and non-tensor quant state items if necessary\n if len(qs_key) == 1:\n first_qs_key = qs_key[0]\n qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(first_qs_key)))\n\n qs_dict = {k.split(\".\")[-1]: v for k, v in qs_dict.items()} # strip prefixes\n assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)\n\n if \"nested_absmax\" in qs_dict:\n offset = torch.tensor(float(qs_dict[\"nested_offset\"])).to(device)\n state2 = cls(\n absmax=qs_dict[\"nested_absmax\"].to(device),\n blocksize=qs_dict[\"nested_blocksize\"],\n code=qs_dict[\"nested_quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"nested_dtype\"]),\n )\n else:\n offset, state2 = None, None\n\n quant_state = cls(\n quant_type=qs_dict[\"quant_type\"],\n absmax=qs_dict[\"absmax\"].to(device),\n blocksize=qs_dict[\"blocksize\"],\n code=qs_dict[\"quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"dtype\"]),\n shape=torch.Size(qs_dict[\"shape\"]) if qs_dict[\"shape\"] is not None else None,\n offset=offset,\n state2=state2,\n )\n return quant_state\n\n def as_dict(self, packed=False):\n \"\"\"\n returns dict of tensors and strings to use in serialization via _save_to_state_dict()\n param: packed -- returns dict[str, torch.Tensor] for state_dict fit for safetensors saving\n \"\"\"\n qs_dict = {\n \"quant_type\": self.quant_type,\n \"absmax\": self.absmax,\n \"blocksize\": self.blocksize,\n \"quant_map\": self.code,\n \"dtype\": str(self.dtype).strip(\"torch.\"),\n \"shape\": tuple(self.shape),\n }\n if self.nested:\n qs_dict.update(\n {\n \"nested_absmax\": self.state2.absmax,\n \"nested_blocksize\": self.state2.blocksize,\n \"nested_quant_map\": self.state2.code.clone(), # un-shared to avoid restoring it after shared tensors are removed by safetensors\n \"nested_dtype\": str(self.state2.dtype).strip(\"torch.\"),\n \"nested_offset\": self.offset.item(),\n },\n )\n if not packed:\n return qs_dict\n\n # packed format allows serialization of non-tensor components, critical for saving in safetensors format\n qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}\n non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}\n qs_packed_dict[\"quant_state.\" + \"bitsandbytes__\" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)\n return qs_packed_dict\n\n def to(self, device):\n # make sure the quantization state is on the right device\n self.code = self.code.to(device)\n self.absmax = self.absmax.to(device)\n if self.nested:\n self.offset = self.offset.to(device)\n self.state2.absmax = self.state2.absmax.to(device)\n self.state2.code = self.state2.code.to(device)\n\n def __eq__(self, other):\n if not isinstance(other, QuantState):\n return False\n\n return (\n torch.allclose(self.absmax, other.absmax, atol=1e-6)\n and self.shape == other.shape\n and torch.allclose(self.code, other.code, atol=1e-6)\n and self.dtype == other.dtype\n and self.blocksize == other.blocksize\n and self.quant_type == other.quant_type\n and (\n self.offset == other.offset\n if self.offset is not None and other.offset is not None\n else self.offset is other.offset\n )\n and (\n self.state2 == other.state2\n if self.state2 is not None and other.state2 is not None\n else self.state2 is other.state2\n )\n )\n\n\ndef quantize_blockwise(\n A: torch.Tensor,\n code: Optional[torch.Tensor] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=4096,\n nested=False,\n) -> tuple[torch.Tensor, QuantState]:\n \"\"\"Quantize a tensor in blocks of values.\n\n The input tensor is quantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is calculated for scaling\n the non-linear quantization.\n\n Args:\n A (`torch.Tensor`): The input tensor. Supports `float16`, `bfloat16`, or `float32` datatypes.\n code (`torch.Tensor`, *optional*):\n A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type.\n For more details, see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561].\n absmax (`torch.Tensor`, *optional*): A tensor to use to stor\n# ... truncated ...","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.GlobalPageManager","uri":"program://bitsandbytes/class/bitsandbytes.functional.GlobalPageManager#L51-L72","kind":"class","name":"GlobalPageManager","path":"bitsandbytes/functional.py","language":"python","start_line":51,"end_line":72,"context_start_line":31,"context_end_line":92,"code":" ),\n \"rmsprop\": (\n lib.crmsprop_static_8bit_grad_32,\n lib.crmsprop_static_8bit_grad_16,\n ),\n \"lion\": (\n lib.clion_static_8bit_grad_32,\n lib.clion_static_8bit_grad_16,\n ),\n \"lamb\": (\n lib.cadam_static_8bit_grad_32,\n lib.cadam_static_8bit_grad_16,\n ),\n \"lars\": (\n lib.cmomentum_static_8bit_grad_32,\n lib.cmomentum_static_8bit_grad_16,\n ),\n}\n\n\nclass GlobalPageManager:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.paged_tensors = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def prefetch_all(self, to_cpu=False):\n # assume the first added, will be the\n # ones that are used first, so swap them in last\n # in the case they are evicted again\n for t in self.paged_tensors[::-1]:\n prefetch_tensor(t, to_cpu)\n\n\nclass CUBLAS_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = {}\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.CUBLAS_Context","uri":"program://bitsandbytes/class/bitsandbytes.functional.CUBLAS_Context#L75-L97","kind":"class","name":"CUBLAS_Context","path":"bitsandbytes/functional.py","language":"python","start_line":75,"end_line":97,"context_start_line":55,"context_end_line":117,"code":" raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.paged_tensors = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def prefetch_all(self, to_cpu=False):\n # assume the first added, will be the\n # ones that are used first, so swap them in last\n # in the case they are evicted again\n for t in self.paged_tensors[::-1]:\n prefetch_tensor(t, to_cpu)\n\n\nclass CUBLAS_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = {}\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:\n prev_device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n self.context[device.index] = ct.c_void_p(lib.get_context())\n torch.cuda.set_device(prev_device)\n return self.context[device.index]\n\n\nclass Cusparse_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = ct.c_void_p(lib.get_cusparse())\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.Cusparse_Context","uri":"program://bitsandbytes/class/bitsandbytes.functional.Cusparse_Context#L100-L114","kind":"class","name":"Cusparse_Context","path":"bitsandbytes/functional.py","language":"python","start_line":100,"end_line":114,"context_start_line":80,"context_end_line":134,"code":"\n def initialize(self):\n self.context = {}\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:\n prev_device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n self.context[device.index] = ct.c_void_p(lib.get_context())\n torch.cuda.set_device(prev_device)\n return self.context[device.index]\n\n\nclass Cusparse_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = ct.c_void_p(lib.get_cusparse())\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)\n\n# When multiple GPUs are present, we use a context manager to\n# switch to the correct device of a tensor before invoking our CUDA\n# kernels in the C++ library. However, when there's only one device\n# there is no need to incur the overhead of cudaGetDevice/cudaSetDevice.\nif torch.cuda.device_count() > 1:\n\n def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:\n import contextlib\n\n def _cuda_device_of(a: torch.Tensor):\n return contextlib.nullcontext()\n\n\ndef get_paged(*shape, dtype=torch.float32, device=FIRST_CUDA_DEVICE):","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_paged","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_paged#L134-L142","kind":"function","name":"get_paged","path":"bitsandbytes/functional.py","language":"python","start_line":134,"end_line":142,"context_start_line":114,"context_end_line":162,"code":" return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)\n\n# When multiple GPUs are present, we use a context manager to\n# switch to the correct device of a tensor before invoking our CUDA\n# kernels in the C++ library. However, when there's only one device\n# there is no need to incur the overhead of cudaGetDevice/cudaSetDevice.\nif torch.cuda.device_count() > 1:\n\n def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:\n import contextlib\n\n def _cuda_device_of(a: torch.Tensor):\n return contextlib.nullcontext()\n\n\ndef get_paged(*shape, dtype=torch.float32, device=FIRST_CUDA_DEVICE):\n num_bytes = dtype.itemsize * prod(shape)\n cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))\n c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))\n new_array = np.ctypeslib.as_array(c_ptr, shape=shape)\n out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)\n out.is_paged = True\n out.page_deviceid = device.index\n return out\n\n\ndef prefetch_tensor(A: torch.Tensor, to_cpu=False):\n assert A.is_paged, \"Only paged tensors can be prefetched!\"\n if to_cpu:\n deviceid = -1\n else:\n deviceid = A.page_deviceid\n\n lib.cprefetch(get_ptr(A), ct.c_size_t(A.nbytes), ct.c_int32(deviceid))\n\n\ndef elementwise_func(func_name, A, B, value, prefetch=True):\n func = None\n if A.dtype == torch.float32:\n func = getattr(lib, f\"c{func_name}_fp32\", None)\n cvalue = ct.c_float(value)\n elif A.dtype == torch.uint8:\n func = getattr(lib, f\"c{func_name}_uint8\", None)\n cvalue = ct.c_uint8(value)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.prefetch_tensor","uri":"program://bitsandbytes/function/bitsandbytes.functional.prefetch_tensor#L145-L152","kind":"function","name":"prefetch_tensor","path":"bitsandbytes/functional.py","language":"python","start_line":145,"end_line":152,"context_start_line":125,"context_end_line":172,"code":" def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:\n import contextlib\n\n def _cuda_device_of(a: torch.Tensor):\n return contextlib.nullcontext()\n\n\ndef get_paged(*shape, dtype=torch.float32, device=FIRST_CUDA_DEVICE):\n num_bytes = dtype.itemsize * prod(shape)\n cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))\n c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))\n new_array = np.ctypeslib.as_array(c_ptr, shape=shape)\n out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)\n out.is_paged = True\n out.page_deviceid = device.index\n return out\n\n\ndef prefetch_tensor(A: torch.Tensor, to_cpu=False):\n assert A.is_paged, \"Only paged tensors can be prefetched!\"\n if to_cpu:\n deviceid = -1\n else:\n deviceid = A.page_deviceid\n\n lib.cprefetch(get_ptr(A), ct.c_size_t(A.nbytes), ct.c_int32(deviceid))\n\n\ndef elementwise_func(func_name, A, B, value, prefetch=True):\n func = None\n if A.dtype == torch.float32:\n func = getattr(lib, f\"c{func_name}_fp32\", None)\n cvalue = ct.c_float(value)\n elif A.dtype == torch.uint8:\n func = getattr(lib, f\"c{func_name}_uint8\", None)\n cvalue = ct.c_uint8(value)\n\n if func is None:\n raise NotImplementedError(f\"Function not implemented: {func_name}\")\n\n is_managed = getattr(A, \"is_managed\", False)\n if is_managed and prefetch:\n prefetch_tensor(A)\n if B is not None:\n prefetch_tensor(B)\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.elementwise_func","uri":"program://bitsandbytes/function/bitsandbytes.functional.elementwise_func#L155-L179","kind":"function","name":"elementwise_func","path":"bitsandbytes/functional.py","language":"python","start_line":155,"end_line":179,"context_start_line":135,"context_end_line":199,"code":" num_bytes = dtype.itemsize * prod(shape)\n cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))\n c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))\n new_array = np.ctypeslib.as_array(c_ptr, shape=shape)\n out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)\n out.is_paged = True\n out.page_deviceid = device.index\n return out\n\n\ndef prefetch_tensor(A: torch.Tensor, to_cpu=False):\n assert A.is_paged, \"Only paged tensors can be prefetched!\"\n if to_cpu:\n deviceid = -1\n else:\n deviceid = A.page_deviceid\n\n lib.cprefetch(get_ptr(A), ct.c_size_t(A.nbytes), ct.c_int32(deviceid))\n\n\ndef elementwise_func(func_name, A, B, value, prefetch=True):\n func = None\n if A.dtype == torch.float32:\n func = getattr(lib, f\"c{func_name}_fp32\", None)\n cvalue = ct.c_float(value)\n elif A.dtype == torch.uint8:\n func = getattr(lib, f\"c{func_name}_uint8\", None)\n cvalue = ct.c_uint8(value)\n\n if func is None:\n raise NotImplementedError(f\"Function not implemented: {func_name}\")\n\n is_managed = getattr(A, \"is_managed\", False)\n if is_managed and prefetch:\n prefetch_tensor(A)\n if B is not None:\n prefetch_tensor(B)\n\n func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))\n if A.is_paged or B.is_paged:\n # paged function are fully asynchronous\n # if we return from this function, we want to the tensor\n # to be in the correct state, that is the final state after the\n # operation occurred. So we synchronize.\n torch.cuda.synchronize()\n\n\ndef fill(A, value, device=None, prefetch=True):\n elementwise_func(\"fill\", A, None, value)\n\n\ndef _mul(A, B, device=None):\n elementwise_func(\"_mul\", A, B, 0)\n\n\ndef create_linear_map(signed=True, total_bits=8, add_zero=True):\n sign = -1.0 if signed else 0.0\n total_values = 2**total_bits\n if add_zero or total_bits < 8:\n # add a zero\n # since we simulate less bits by having zeros in the data type, we\n # we need to center the quantization around zero and as such lose\n # a single value\n total_values = 2**total_bits if not signed else 2**total_bits - 1\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.fill","uri":"program://bitsandbytes/function/bitsandbytes.functional.fill#L182-L183","kind":"function","name":"fill","path":"bitsandbytes/functional.py","language":"python","start_line":182,"end_line":183,"context_start_line":162,"context_end_line":203,"code":" cvalue = ct.c_uint8(value)\n\n if func is None:\n raise NotImplementedError(f\"Function not implemented: {func_name}\")\n\n is_managed = getattr(A, \"is_managed\", False)\n if is_managed and prefetch:\n prefetch_tensor(A)\n if B is not None:\n prefetch_tensor(B)\n\n func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))\n if A.is_paged or B.is_paged:\n # paged function are fully asynchronous\n # if we return from this function, we want to the tensor\n # to be in the correct state, that is the final state after the\n # operation occurred. So we synchronize.\n torch.cuda.synchronize()\n\n\ndef fill(A, value, device=None, prefetch=True):\n elementwise_func(\"fill\", A, None, value)\n\n\ndef _mul(A, B, device=None):\n elementwise_func(\"_mul\", A, B, 0)\n\n\ndef create_linear_map(signed=True, total_bits=8, add_zero=True):\n sign = -1.0 if signed else 0.0\n total_values = 2**total_bits\n if add_zero or total_bits < 8:\n # add a zero\n # since we simulate less bits by having zeros in the data type, we\n # we need to center the quantization around zero and as such lose\n # a single value\n total_values = 2**total_bits if not signed else 2**total_bits - 1\n\n values = torch.linspace(sign, 1.0, total_values)\n gap = 256 - values.numel()\n if gap == 0:\n return values","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional._mul","uri":"program://bitsandbytes/function/bitsandbytes.functional._mul#L186-L187","kind":"function","name":"_mul","path":"bitsandbytes/functional.py","language":"python","start_line":186,"end_line":187,"context_start_line":166,"context_end_line":207,"code":"\n is_managed = getattr(A, \"is_managed\", False)\n if is_managed and prefetch:\n prefetch_tensor(A)\n if B is not None:\n prefetch_tensor(B)\n\n func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))\n if A.is_paged or B.is_paged:\n # paged function are fully asynchronous\n # if we return from this function, we want to the tensor\n # to be in the correct state, that is the final state after the\n # operation occurred. So we synchronize.\n torch.cuda.synchronize()\n\n\ndef fill(A, value, device=None, prefetch=True):\n elementwise_func(\"fill\", A, None, value)\n\n\ndef _mul(A, B, device=None):\n elementwise_func(\"_mul\", A, B, 0)\n\n\ndef create_linear_map(signed=True, total_bits=8, add_zero=True):\n sign = -1.0 if signed else 0.0\n total_values = 2**total_bits\n if add_zero or total_bits < 8:\n # add a zero\n # since we simulate less bits by having zeros in the data type, we\n # we need to center the quantization around zero and as such lose\n # a single value\n total_values = 2**total_bits if not signed else 2**total_bits - 1\n\n values = torch.linspace(sign, 1.0, total_values)\n gap = 256 - values.numel()\n if gap == 0:\n return values\n else:\n l = values.numel() // 2 # noqa: E741\n return torch.Tensor(values[:l].tolist() + [0] * gap + values[l:].tolist())\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.create_linear_map","uri":"program://bitsandbytes/function/bitsandbytes.functional.create_linear_map#L190-L206","kind":"function","name":"create_linear_map","path":"bitsandbytes/functional.py","language":"python","start_line":190,"end_line":206,"context_start_line":170,"context_end_line":226,"code":" if B is not None:\n prefetch_tensor(B)\n\n func(get_ptr(A), get_ptr(B), cvalue, ct.c_int64(A.numel()))\n if A.is_paged or B.is_paged:\n # paged function are fully asynchronous\n # if we return from this function, we want to the tensor\n # to be in the correct state, that is the final state after the\n # operation occurred. So we synchronize.\n torch.cuda.synchronize()\n\n\ndef fill(A, value, device=None, prefetch=True):\n elementwise_func(\"fill\", A, None, value)\n\n\ndef _mul(A, B, device=None):\n elementwise_func(\"_mul\", A, B, 0)\n\n\ndef create_linear_map(signed=True, total_bits=8, add_zero=True):\n sign = -1.0 if signed else 0.0\n total_values = 2**total_bits\n if add_zero or total_bits < 8:\n # add a zero\n # since we simulate less bits by having zeros in the data type, we\n # we need to center the quantization around zero and as such lose\n # a single value\n total_values = 2**total_bits if not signed else 2**total_bits - 1\n\n values = torch.linspace(sign, 1.0, total_values)\n gap = 256 - values.numel()\n if gap == 0:\n return values\n else:\n l = values.numel() // 2 # noqa: E741\n return torch.Tensor(values[:l].tolist() + [0] * gap + values[l:].tolist())\n\n\ndef create_normal_map(offset=0.9677083, use_extra_value=True):\n try:\n from scipy.stats import norm\n except ImportError as ie:\n raise ImportError(\n \"Scipy is required for `create_normal_map`. Install `bitsandbytes` with the `[test]` extra.\",\n ) from ie\n\n if use_extra_value:\n # one more positive value, this is an asymmetric type\n v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()\n v2 = [0] * (256 - 15) ## we have 15 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n else:\n v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()\n v2 = [0] * (256 - 14) ## we have 14 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.create_normal_map","uri":"program://bitsandbytes/function/bitsandbytes.functional.create_normal_map#L209-L235","kind":"function","name":"create_normal_map","path":"bitsandbytes/functional.py","language":"python","start_line":209,"end_line":235,"context_start_line":189,"context_end_line":255,"code":"\ndef create_linear_map(signed=True, total_bits=8, add_zero=True):\n sign = -1.0 if signed else 0.0\n total_values = 2**total_bits\n if add_zero or total_bits < 8:\n # add a zero\n # since we simulate less bits by having zeros in the data type, we\n # we need to center the quantization around zero and as such lose\n # a single value\n total_values = 2**total_bits if not signed else 2**total_bits - 1\n\n values = torch.linspace(sign, 1.0, total_values)\n gap = 256 - values.numel()\n if gap == 0:\n return values\n else:\n l = values.numel() // 2 # noqa: E741\n return torch.Tensor(values[:l].tolist() + [0] * gap + values[l:].tolist())\n\n\ndef create_normal_map(offset=0.9677083, use_extra_value=True):\n try:\n from scipy.stats import norm\n except ImportError as ie:\n raise ImportError(\n \"Scipy is required for `create_normal_map`. Install `bitsandbytes` with the `[test]` extra.\",\n ) from ie\n\n if use_extra_value:\n # one more positive value, this is an asymmetric type\n v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()\n v2 = [0] * (256 - 15) ## we have 15 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n else:\n v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()\n v2 = [0] * (256 - 14) ## we have 14 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n\n v = v1 + v2 + v3\n\n values = torch.Tensor(v)\n values = values.sort().values\n values /= values.max()\n\n assert values.numel() == 256\n\n return values\n\n\ndef create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):\n e = exponent_bits\n p = precision_bits\n has_sign = 1 if signed else 0\n assert e + p == total_bits - has_sign\n # the exponent is biased to 2^(e-1) -1 == 0\n evalues = []\n for i, val in enumerate(range(-(2 ** (exponent_bits - has_sign)), 2 ** (exponent_bits - has_sign), 1)):\n evalues.append(2**val)\n\n values = []\n lst = list(itertools.product([0, 1], repeat=precision_bits))\n # for ev in evalues:\n bias = 2 ** (exponent_bits - 1)\n for evalue in range(2 ** (exponent_bits)):\n for bit_pattern in lst:\n value = 1 if evalue != 0 else 0\n for i, pval in enumerate(list(bit_pattern)):","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.create_fp8_map","uri":"program://bitsandbytes/function/bitsandbytes.functional.create_fp8_map#L238-L277","kind":"function","name":"create_fp8_map","path":"bitsandbytes/functional.py","language":"python","start_line":238,"end_line":277,"context_start_line":218,"context_end_line":297,"code":" # one more positive value, this is an asymmetric type\n v1 = norm.ppf(torch.linspace(offset, 0.5, 9)[:-1]).tolist()\n v2 = [0] * (256 - 15) ## we have 15 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n else:\n v1 = norm.ppf(torch.linspace(offset, 0.5, 8)[:-1]).tolist()\n v2 = [0] * (256 - 14) ## we have 14 non-zero values in this data type\n v3 = (-norm.ppf(torch.linspace(offset, 0.5, 8)[:-1])).tolist()\n\n v = v1 + v2 + v3\n\n values = torch.Tensor(v)\n values = values.sort().values\n values /= values.max()\n\n assert values.numel() == 256\n\n return values\n\n\ndef create_fp8_map(signed=True, exponent_bits=5, precision_bits=2, total_bits=8):\n e = exponent_bits\n p = precision_bits\n has_sign = 1 if signed else 0\n assert e + p == total_bits - has_sign\n # the exponent is biased to 2^(e-1) -1 == 0\n evalues = []\n for i, val in enumerate(range(-(2 ** (exponent_bits - has_sign)), 2 ** (exponent_bits - has_sign), 1)):\n evalues.append(2**val)\n\n values = []\n lst = list(itertools.product([0, 1], repeat=precision_bits))\n # for ev in evalues:\n bias = 2 ** (exponent_bits - 1)\n for evalue in range(2 ** (exponent_bits)):\n for bit_pattern in lst:\n value = 1 if evalue != 0 else 0\n for i, pval in enumerate(list(bit_pattern)):\n value += pval * (2 ** -(i + 1))\n if evalue == 0:\n # subnormals\n value = value * 2**-(bias)\n else:\n # normals\n value = value * 2 ** -(evalue - bias - 1)\n values.append(value)\n if signed:\n values.append(-value)\n\n assert len(values) == 2**total_bits\n values.sort()\n if total_bits < 8:\n gap = 256 - len(values)\n for i in range(gap):\n values.append(0)\n values.sort()\n code = torch.tensor(values)\n code /= code.max()\n\n return code\n\n\ndef create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):\n \"\"\"\n Creates the dynamic quantiztion map.\n\n The dynamic data type is made up of a dynamic exponent and\n fraction. As the exponent increase from 0 to -7 the number\n of bits available for the fraction shrinks.\n\n This is a generalization of the dynamic type where a certain\n number of the bits and be reserved for the linear quantization\n region (the fraction). n determines the maximum number of\n exponent bits.\n\n For more details see\n (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]\n \"\"\"\n\n data = []","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.create_dynamic_map","uri":"program://bitsandbytes/function/bitsandbytes.functional.create_dynamic_map#L280-L332","kind":"function","name":"create_dynamic_map","path":"bitsandbytes/functional.py","language":"python","start_line":280,"end_line":332,"context_start_line":260,"context_end_line":352,"code":" else:\n # normals\n value = value * 2 ** -(evalue - bias - 1)\n values.append(value)\n if signed:\n values.append(-value)\n\n assert len(values) == 2**total_bits\n values.sort()\n if total_bits < 8:\n gap = 256 - len(values)\n for i in range(gap):\n values.append(0)\n values.sort()\n code = torch.tensor(values)\n code /= code.max()\n\n return code\n\n\ndef create_dynamic_map(signed=True, max_exponent_bits=7, total_bits=8):\n \"\"\"\n Creates the dynamic quantiztion map.\n\n The dynamic data type is made up of a dynamic exponent and\n fraction. As the exponent increase from 0 to -7 the number\n of bits available for the fraction shrinks.\n\n This is a generalization of the dynamic type where a certain\n number of the bits and be reserved for the linear quantization\n region (the fraction). n determines the maximum number of\n exponent bits.\n\n For more details see\n (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561]\n \"\"\"\n\n data = []\n # these are additional items that come from the case\n # where all the exponent bits are zero and no\n # indicator bit is present\n non_sign_bits = total_bits - 1\n additional_items = 2 ** (non_sign_bits - max_exponent_bits) - 1\n for i in range(max_exponent_bits):\n fraction_items = int(\n 2 ** (i + non_sign_bits - max_exponent_bits) + 1\n if signed\n else 2 ** (i + non_sign_bits - max_exponent_bits + 1) + 1,\n )\n boundaries = torch.linspace(0.1, 1, fraction_items, dtype=torch.float32)\n means = (boundaries[:-1] + boundaries[1:]) / 2.0\n data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n if signed:\n data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n\n if additional_items > 0:\n boundaries = torch.linspace(0.1, 1, additional_items + 1, dtype=torch.float32)\n means = (boundaries[:-1] + boundaries[1:]) / 2.0\n data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n if signed:\n data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n\n data.append(0)\n data.append(1.0)\n\n assert len(data) == 2**total_bits\n\n gap = 256 - len(data)\n for i in range(gap):\n data.append(0)\n\n data.sort()\n return torch.tensor(data, dtype=torch.float32)\n\n\ndef is_on_gpu(tensors: Iterable[Optional[torch.Tensor]]):\n \"\"\"Verifies that the input tensors are all on the same device.\n\n An input tensor may also be marked as `paged`, in which case the device placement is ignored.\n\n Args:\n tensors (`Iterable[Optional[torch.Tensor]]`): A list of tensors to verify.\n\n Raises:\n `RuntimeError`: Raised when the verification fails.\n\n Returns:\n `Literal[True]`\n \"\"\"\n\n on_gpu = True\n gpu_ids = set()\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.is_on_gpu","uri":"program://bitsandbytes/function/bitsandbytes.functional.is_on_gpu#L335-L368","kind":"function","name":"is_on_gpu","path":"bitsandbytes/functional.py","language":"python","start_line":335,"end_line":368,"context_start_line":315,"context_end_line":388,"code":" if additional_items > 0:\n boundaries = torch.linspace(0.1, 1, additional_items + 1, dtype=torch.float32)\n means = (boundaries[:-1] + boundaries[1:]) / 2.0\n data += ((10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n if signed:\n data += (-(10 ** (-(max_exponent_bits - 1) + i)) * means).tolist()\n\n data.append(0)\n data.append(1.0)\n\n assert len(data) == 2**total_bits\n\n gap = 256 - len(data)\n for i in range(gap):\n data.append(0)\n\n data.sort()\n return torch.tensor(data, dtype=torch.float32)\n\n\ndef is_on_gpu(tensors: Iterable[Optional[torch.Tensor]]):\n \"\"\"Verifies that the input tensors are all on the same device.\n\n An input tensor may also be marked as `paged`, in which case the device placement is ignored.\n\n Args:\n tensors (`Iterable[Optional[torch.Tensor]]`): A list of tensors to verify.\n\n Raises:\n `RuntimeError`: Raised when the verification fails.\n\n Returns:\n `Literal[True]`\n \"\"\"\n\n on_gpu = True\n gpu_ids = set()\n\n for t in tensors:\n # NULL pointers and paged tensors are OK.\n if t is not None and not getattr(t, \"is_paged\", False):\n on_gpu &= t.device.type != \"cpu\"\n gpu_ids.add((t.device.type, t.device.index))\n\n if not on_gpu:\n raise RuntimeError(\n f\"All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n\n if len(gpu_ids) > 1:\n raise RuntimeError(\n f\"Input tensors need to be on the same GPU, but found the following tensor and device combinations:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n return on_gpu\n\n\ndef _get_tensor_stream(tensor: Tensor) -> ct.c_void_p:\n # We use the raw stream for performance reasons.\n if tensor.device.type == \"xpu\":\n return ct.c_void_p(torch._C._xpu_getCurrentRawStream(tensor.device.index))\n return ct.c_void_p(torch._C._cuda_getCurrentRawStream(tensor.device.index))\n\n\ndef get_ptr(A: Optional[Tensor]) -> Optional[ct.c_void_p]:\n \"\"\"Gets the memory address of the first element of a tenso\n\n Args:\n A (`Optional[Tensor]`): A PyTorch tensor.\n\n Returns:\n `Optional[ct.c_void_p]`: A pointer to the underlying tensor data.\n \"\"\"\n if A is None:\n return None","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional._get_tensor_stream","uri":"program://bitsandbytes/function/bitsandbytes.functional._get_tensor_stream#L371-L375","kind":"function","name":"_get_tensor_stream","path":"bitsandbytes/functional.py","language":"python","start_line":371,"end_line":375,"context_start_line":351,"context_end_line":395,"code":" gpu_ids = set()\n\n for t in tensors:\n # NULL pointers and paged tensors are OK.\n if t is not None and not getattr(t, \"is_paged\", False):\n on_gpu &= t.device.type != \"cpu\"\n gpu_ids.add((t.device.type, t.device.index))\n\n if not on_gpu:\n raise RuntimeError(\n f\"All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n\n if len(gpu_ids) > 1:\n raise RuntimeError(\n f\"Input tensors need to be on the same GPU, but found the following tensor and device combinations:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n return on_gpu\n\n\ndef _get_tensor_stream(tensor: Tensor) -> ct.c_void_p:\n # We use the raw stream for performance reasons.\n if tensor.device.type == \"xpu\":\n return ct.c_void_p(torch._C._xpu_getCurrentRawStream(tensor.device.index))\n return ct.c_void_p(torch._C._cuda_getCurrentRawStream(tensor.device.index))\n\n\ndef get_ptr(A: Optional[Tensor]) -> Optional[ct.c_void_p]:\n \"\"\"Gets the memory address of the first element of a tenso\n\n Args:\n A (`Optional[Tensor]`): A PyTorch tensor.\n\n Returns:\n `Optional[ct.c_void_p]`: A pointer to the underlying tensor data.\n \"\"\"\n if A is None:\n return None\n\n return ct.c_void_p(A.data_ptr())\n\n\nclass QuantState:\n \"\"\"container for quantization state components to work with Params4bit and similar classes\"\"\"\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_ptr","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_ptr#L378-L390","kind":"function","name":"get_ptr","path":"bitsandbytes/functional.py","language":"python","start_line":378,"end_line":390,"context_start_line":358,"context_end_line":410,"code":"\n if not on_gpu:\n raise RuntimeError(\n f\"All input tensors need to be on the same GPU, but found some tensors to not be on a GPU:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n\n if len(gpu_ids) > 1:\n raise RuntimeError(\n f\"Input tensors need to be on the same GPU, but found the following tensor and device combinations:\\n {[(t.shape, t.device) for t in tensors]}\",\n )\n return on_gpu\n\n\ndef _get_tensor_stream(tensor: Tensor) -> ct.c_void_p:\n # We use the raw stream for performance reasons.\n if tensor.device.type == \"xpu\":\n return ct.c_void_p(torch._C._xpu_getCurrentRawStream(tensor.device.index))\n return ct.c_void_p(torch._C._cuda_getCurrentRawStream(tensor.device.index))\n\n\ndef get_ptr(A: Optional[Tensor]) -> Optional[ct.c_void_p]:\n \"\"\"Gets the memory address of the first element of a tenso\n\n Args:\n A (`Optional[Tensor]`): A PyTorch tensor.\n\n Returns:\n `Optional[ct.c_void_p]`: A pointer to the underlying tensor data.\n \"\"\"\n if A is None:\n return None\n\n return ct.c_void_p(A.data_ptr())\n\n\nclass QuantState:\n \"\"\"container for quantization state components to work with Params4bit and similar classes\"\"\"\n\n valid_quant_types = (\"fp4\", \"nf4\")\n valid_qs_type_keys = [f\"bitsandbytes__{x}\" for x in valid_quant_types]\n valid_qs_keys = [\n \"absmax\",\n \"quant_map\",\n \"nested_absmax\",\n \"nested_quant_map\",\n \"quant_state\",\n \"quant_type\",\n \"blocksize\",\n \"dtype\",\n \"shape\",\n \"nested_blocksize\",\n \"nested_dtype\",\n \"nested_offset\",","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.QuantState","uri":"program://bitsandbytes/class/bitsandbytes.functional.QuantState#L393-L567","kind":"class","name":"QuantState","path":"bitsandbytes/functional.py","language":"python","start_line":393,"end_line":567,"context_start_line":373,"context_end_line":587,"code":" if tensor.device.type == \"xpu\":\n return ct.c_void_p(torch._C._xpu_getCurrentRawStream(tensor.device.index))\n return ct.c_void_p(torch._C._cuda_getCurrentRawStream(tensor.device.index))\n\n\ndef get_ptr(A: Optional[Tensor]) -> Optional[ct.c_void_p]:\n \"\"\"Gets the memory address of the first element of a tenso\n\n Args:\n A (`Optional[Tensor]`): A PyTorch tensor.\n\n Returns:\n `Optional[ct.c_void_p]`: A pointer to the underlying tensor data.\n \"\"\"\n if A is None:\n return None\n\n return ct.c_void_p(A.data_ptr())\n\n\nclass QuantState:\n \"\"\"container for quantization state components to work with Params4bit and similar classes\"\"\"\n\n valid_quant_types = (\"fp4\", \"nf4\")\n valid_qs_type_keys = [f\"bitsandbytes__{x}\" for x in valid_quant_types]\n valid_qs_keys = [\n \"absmax\",\n \"quant_map\",\n \"nested_absmax\",\n \"nested_quant_map\",\n \"quant_state\",\n \"quant_type\",\n \"blocksize\",\n \"dtype\",\n \"shape\",\n \"nested_blocksize\",\n \"nested_dtype\",\n \"nested_offset\",\n ]\n\n def __init__(\n self,\n absmax,\n shape=None,\n code=None,\n blocksize=None,\n quant_type=None,\n dtype=None,\n offset=None,\n state2=None,\n ):\n self.absmax = absmax\n self.shape = shape\n self.code = code\n self.dtype = dtype\n self.blocksize = blocksize\n self.quant_type = quant_type\n self.offset = offset\n self.state2 = state2\n self.nested = state2 is not None\n\n def __getitem__(self, idx):\n \"\"\"\n ensures compatibility with older quant state scheme with nested lists.\n assumes the following layout:\n state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]\n state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]\n \"\"\"\n if self.nested:\n list_repr = [\n self.absmax,\n self.shape,\n self.dtype,\n self.blocksize,\n [self.offset, self.state2],\n self.quant_type,\n ]\n else:\n list_repr = [self.absmax, self.shape, self.dtype, self.blocksize, None, self.quant_type]\n return list_repr[idx]\n\n @classmethod\n def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> \"QuantState\":\n \"\"\"\n unpacks components of state_dict into QuantState\n where necessary, convert into strings, torch.dtype, ints, etc.\n\n qs_dict: based on state_dict, with only relevant keys, striped of prefixes.\n\n item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.\n \"\"\"\n\n # unpacking tensor with non-tensor components\n qs_key = [k for k, v in qs_dict.items() if \"quant_state\" in k and isinstance(v, torch.Tensor)]\n if not len(qs_key) and \"quant_type\" not in qs_dict:\n raise ValueError(\"Expected packed or unpacked quant_state items, found neither\")\n elif len(qs_key) != 1 or qs_key[0].split(\".\")[-1] not in cls.valid_qs_type_keys:\n raise ValueError(\n f\"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\\nDetected {qs_key}.\",\n )\n\n # unpacking minor and non-tensor quant state items if necessary\n if len(qs_key) == 1:\n first_qs_key = qs_key[0]\n qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(first_qs_key)))\n\n qs_dict = {k.split(\".\")[-1]: v for k, v in qs_dict.items()} # strip prefixes\n assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)\n\n if \"nested_absmax\" in qs_dict:\n offset = torch.tensor(float(qs_dict[\"nested_offset\"])).to(device)\n state2 = cls(\n absmax=qs_dict[\"nested_absmax\"].to(device),\n blocksize=qs_dict[\"nested_blocksize\"],\n code=qs_dict[\"nested_quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"nested_dtype\"]),\n )\n else:\n offset, state2 = None, None\n\n quant_state = cls(\n quant_type=qs_dict[\"quant_type\"],\n absmax=qs_dict[\"absmax\"].to(device),\n blocksize=qs_dict[\"blocksize\"],\n code=qs_dict[\"quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"dtype\"]),\n shape=torch.Size(qs_dict[\"shape\"]) if qs_dict[\"shape\"] is not None else None,\n offset=offset,\n state2=state2,\n )\n return quant_state\n\n def as_dict(self, packed=False):\n \"\"\"\n returns dict of tensors and strings to use in serialization via _save_to_state_dict()\n param: packed -- returns dict[str, torch.Tensor] for state_dict fit for safetensors saving\n \"\"\"\n qs_dict = {\n \"quant_type\": self.quant_type,\n \"absmax\": self.absmax,\n \"blocksize\": self.blocksize,\n \"quant_map\": self.code,\n \"dtype\": str(self.dtype).strip(\"torch.\"),\n \"shape\": tuple(self.shape),\n }\n if self.nested:\n qs_dict.update(\n {\n \"nested_absmax\": self.state2.absmax,\n \"nested_blocksize\": self.state2.blocksize,\n \"nested_quant_map\": self.state2.code.clone(), # un-shared to avoid restoring it after shared tensors are removed by safetensors\n \"nested_dtype\": str(self.state2.dtype).strip(\"torch.\"),\n \"nested_offset\": self.offset.item(),\n },\n )\n if not packed:\n return qs_dict\n\n # packed format allows serialization of non-tensor components, critical for saving in safetensors format\n qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}\n non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}\n qs_packed_dict[\"quant_state.\" + \"bitsandbytes__\" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)\n return qs_packed_dict\n\n def to(self, device):\n # make sure the quantization state is on the right device\n self.code = self.code.to(device)\n self.absmax = self.absmax.to(device)\n if self.nested:\n self.offset = self.offset.to(device)\n self.state2.absmax = self.state2.absmax.to(device)\n self.state2.code = self.state2.code.to(device)\n\n def __eq__(self, other):\n if not isinstance(other, QuantState):\n return False\n\n return (\n torch.allclose(self.absmax, other.absmax, atol=1e-6)\n and self.shape == other.shape\n and torch.allclose(self.code, other.code, atol=1e-6)\n and self.dtype == other.dtype\n and self.blocksize == other.blocksize\n and self.quant_type == other.quant_type\n and (\n self.offset == other.offset\n if self.offset is not None and other.offset is not None\n else self.offset is other.offset\n )\n and (\n self.state2 == other.state2\n if self.state2 is not None and other.state2 is not None\n else self.state2 is other.state2\n )\n )\n\n\ndef quantize_blockwise(\n A: torch.Tensor,\n code: Optional[torch.Tensor] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=4096,\n nested=False,\n) -> tuple[torch.Tensor, QuantState]:\n \"\"\"Quantize a tensor in blocks of values.\n\n The input tensor is quantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is calculated for scaling\n the non-linear quantization.\n\n Args:\n A (`torch.Tensor`): The input tensor. Supports `float16`, `bfloat16`, or `float32` datatypes.\n code (`torch.Tensor`, *optional*):\n A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type.","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.quantize_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.functional.quantize_blockwise#L570-L638","kind":"function","name":"quantize_blockwise","path":"bitsandbytes/functional.py","language":"python","start_line":570,"end_line":638,"context_start_line":550,"context_end_line":658,"code":" return (\n torch.allclose(self.absmax, other.absmax, atol=1e-6)\n and self.shape == other.shape\n and torch.allclose(self.code, other.code, atol=1e-6)\n and self.dtype == other.dtype\n and self.blocksize == other.blocksize\n and self.quant_type == other.quant_type\n and (\n self.offset == other.offset\n if self.offset is not None and other.offset is not None\n else self.offset is other.offset\n )\n and (\n self.state2 == other.state2\n if self.state2 is not None and other.state2 is not None\n else self.state2 is other.state2\n )\n )\n\n\ndef quantize_blockwise(\n A: torch.Tensor,\n code: Optional[torch.Tensor] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=4096,\n nested=False,\n) -> tuple[torch.Tensor, QuantState]:\n \"\"\"Quantize a tensor in blocks of values.\n\n The input tensor is quantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is calculated for scaling\n the non-linear quantization.\n\n Args:\n A (`torch.Tensor`): The input tensor. Supports `float16`, `bfloat16`, or `float32` datatypes.\n code (`torch.Tensor`, *optional*):\n A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type.\n For more details, see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561].\n absmax (`torch.Tensor`, *optional*): A tensor to use to store the absmax values.\n out (`torch.Tensor`, *optional*): A tensor to use to store the result.\n blocksize (`int`, *optional*):\n The size of the blocks. Defaults to 4096.\n Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.\n nested (`bool`, *optional*): Whether to additionally quantize the absmax values. Defaults to False.\n\n Raises:\n ValueError: Raised when the input data type is not supported.\n\n Returns:\n `Tuple[torch.Tensor, QuantState]`: A tuple containing the quantization results.\n - `torch.Tensor`: The quantized tensor.\n - [`QuantState`]: The state object used to undo the quantization.\n \"\"\"\n\n if code is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n\n _out, _absmax = torch.ops.bitsandbytes.quantize_blockwise.default(\n A,\n code.to(A.device),\n blocksize,\n )\n\n if nested:\n offset = _absmax.mean()\n _absmax -= offset\n qabsmax, state2 = quantize_blockwise(_absmax, blocksize=blocksize, nested=False)\n quant_state = QuantState(\n absmax=qabsmax,\n code=code.to(A.device, copy=True),\n blocksize=blocksize,\n dtype=A.dtype,\n offset=offset,\n state2=state2,\n )\n else:\n quant_state = QuantState(absmax=_absmax, code=code.to(A.device, copy=True), blocksize=blocksize, dtype=A.dtype)\n\n # TODO(matthewdouglas): Deprecate out kwarg\n out = out.copy_(_out) if out is not None else _out\n\n # TODO(matthewdouglas): Deprecate absmax kwarg\n if absmax is not None:\n quant_state.absmax = absmax.copy_(quant_state.absmax)\n\n return out, quant_state\n\n\ndef dequantize_blockwise(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: int = 4096,\n nested=False,\n) -> torch.Tensor:\n \"\"\"Dequantize a tensor in blocks of values.\n\n The input tensor is dequantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is used for scaling\n the non-linear dequantization.\n\n Args:\n A (`torch.Tensor`): The quantized input tensor.\n quant_state ([`QuantState`], *optional*):","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.dequantize_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.functional.dequantize_blockwise#L641-L715","kind":"function","name":"dequantize_blockwise","path":"bitsandbytes/functional.py","language":"python","start_line":641,"end_line":715,"context_start_line":621,"context_end_line":735,"code":" absmax=qabsmax,\n code=code.to(A.device, copy=True),\n blocksize=blocksize,\n dtype=A.dtype,\n offset=offset,\n state2=state2,\n )\n else:\n quant_state = QuantState(absmax=_absmax, code=code.to(A.device, copy=True), blocksize=blocksize, dtype=A.dtype)\n\n # TODO(matthewdouglas): Deprecate out kwarg\n out = out.copy_(_out) if out is not None else _out\n\n # TODO(matthewdouglas): Deprecate absmax kwarg\n if absmax is not None:\n quant_state.absmax = absmax.copy_(quant_state.absmax)\n\n return out, quant_state\n\n\ndef dequantize_blockwise(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: int = 4096,\n nested=False,\n) -> torch.Tensor:\n \"\"\"Dequantize a tensor in blocks of values.\n\n The input tensor is dequantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is used for scaling\n the non-linear dequantization.\n\n Args:\n A (`torch.Tensor`): The quantized input tensor.\n quant_state ([`QuantState`], *optional*):\n The quantization state as returned by [`quantize_blockwise`].\n Required if `absmax` is not provided.\n absmax (`torch.Tensor`, *optional*):\n A tensor containing the scaling values.\n Required if `quant_state` is not provided and ignored otherwise.\n code (`torch.Tensor`, *optional*):\n A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type.\n For more details, see (8-Bit Approximations for Parallelism in Deep Learning)[https://arxiv.org/abs/1511.04561].\n Ignored when `quant_state` is provided.\n out (`torch.Tensor`, *optional*): A tensor to use to store the result.\n blocksize (`int`, *optional*):\n The size of the blocks. Defaults to 4096.\n Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.\n Ignored when `quant_state` is provided.\n\n Raises:\n ValueError: Raised when the input data type is not supported.\n\n Returns:\n `torch.Tensor`:\n The dequantized tensor. The datatype is indicated by `quant_state.dtype` and defaults to `torch.float32`.\n \"\"\"\n\n assert quant_state is not None or absmax is not None\n if code is None and quant_state is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n\n if quant_state is None:\n quant_state = QuantState(absmax=absmax, code=code, blocksize=blocksize, dtype=torch.float32)\n\n absmax = quant_state.absmax\n if quant_state.nested:\n absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)\n absmax += quant_state.offset\n if absmax.dtype != torch.float32:\n absmax = absmax.float()\n\n if out is not None:\n torch.ops.bitsandbytes.dequantize_blockwise.out(\n A,\n absmax,\n quant_state.code.to(A.device),\n quant_state.blocksize,\n quant_state.dtype,\n out=out,\n )\n return out\n\n return torch.ops.bitsandbytes.dequantize_blockwise.default(\n A,\n absmax,\n quant_state.code.to(A.device),\n quant_state.blocksize,\n quant_state.dtype,\n )\n\n\ndef get_4bit_type(typename, device=None, blocksize=64):\n if device is None:\n device = \"cuda\"\n data = None\n if typename == \"nf4\":\n \"\"\" Implements the NF4 data type.\n\n Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that\n is normalized into the range [-1, 1].\n\n For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)\n\n Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in\n the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.\n \"\"\"\n data = [\n -1.0,\n -0.6961928009986877,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_4bit_type","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_4bit_type#L718-L797","kind":"function","name":"get_4bit_type","path":"bitsandbytes/functional.py","language":"python","start_line":718,"end_line":797,"context_start_line":698,"context_end_line":817,"code":" if out is not None:\n torch.ops.bitsandbytes.dequantize_blockwise.out(\n A,\n absmax,\n quant_state.code.to(A.device),\n quant_state.blocksize,\n quant_state.dtype,\n out=out,\n )\n return out\n\n return torch.ops.bitsandbytes.dequantize_blockwise.default(\n A,\n absmax,\n quant_state.code.to(A.device),\n quant_state.blocksize,\n quant_state.dtype,\n )\n\n\ndef get_4bit_type(typename, device=None, blocksize=64):\n if device is None:\n device = \"cuda\"\n data = None\n if typename == \"nf4\":\n \"\"\" Implements the NF4 data type.\n\n Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that\n is normalized into the range [-1, 1].\n\n For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)\n\n Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in\n the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.\n \"\"\"\n data = [\n -1.0,\n -0.6961928009986877,\n -0.5250730514526367,\n -0.39491748809814453,\n -0.28444138169288635,\n -0.18477343022823334,\n -0.09105003625154495,\n 0.0,\n 0.07958029955625534,\n 0.16093020141124725,\n 0.24611230194568634,\n 0.33791524171829224,\n 0.44070982933044434,\n 0.5626170039176941,\n 0.7229568362236023,\n 1.0,\n ]\n elif typename == \"fp4\":\n # 0b000 = 0\n # 0b001 = 0.0625\n # 0b010 = 8\n # 0b011 = 12\n # 0b100 = 4\n # 0b101 = 6\n # 0b110 = 2\n # 0b111 = 3\n # can also be created with bnb.functional.create_fp8_map(signed=True, exponent_bits=2, precision_bits=1, total_bits=4)\n data = [0, 0.0625, 8.0, 12.0, 4.0, 6.0, 2.0, 3.0, -0, -0.0625, -8.0, -12.0, -4.0, -6.0, -2.0, -3.0]\n elif typename == \"int4\":\n data = [7, 6, 5, 4, 3, 2, 1, 0, -0, -1, -2, -3, -4, -5, -6, -7]\n elif typename == \"af4\":\n # Taken from: NF4 Isn't Information Theoretically Optimal (and that's Good)\n # https://arxiv.org/abs/2306.06965\n if blocksize == 64:\n data = [\n -1.0,\n -0.69441008,\n -0.51243739,\n -0.3736951,\n -0.25607552,\n -0.14982478,\n -0.04934812,\n 0.0,\n 0.04273164,\n 0.12934483,\n 0.21961274,\n 0.31675666,\n 0.42563882,\n 0.55496234,\n 0.72424863,\n 1.0,\n ][::-1]\n else:\n raise NotImplementedError(\"4-bit AbnormalFloats currently only support blocksize 64.\")\n\n if data is None:\n raise NotImplementedError(f\"Typename {typename} not supported\")\n\n data = torch.tensor(data, device=device)\n data.div_(data.abs().max())\n\n assert data.numel() == 16\n\n return data\n\n\ndef quantize_fp4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"fp4\", quant_storage)\n\n\ndef quantize_nf4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.quantize_fp4","uri":"program://bitsandbytes/function/bitsandbytes.functional.quantize_fp4#L800-L810","kind":"function","name":"quantize_fp4","path":"bitsandbytes/functional.py","language":"python","start_line":800,"end_line":810,"context_start_line":780,"context_end_line":830,"code":" 0.31675666,\n 0.42563882,\n 0.55496234,\n 0.72424863,\n 1.0,\n ][::-1]\n else:\n raise NotImplementedError(\"4-bit AbnormalFloats currently only support blocksize 64.\")\n\n if data is None:\n raise NotImplementedError(f\"Typename {typename} not supported\")\n\n data = torch.tensor(data, device=device)\n data.div_(data.abs().max())\n\n assert data.numel() == 16\n\n return data\n\n\ndef quantize_fp4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"fp4\", quant_storage)\n\n\ndef quantize_nf4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"nf4\", quant_storage)\n\n\ndef quantize_4bit(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.quantize_nf4","uri":"program://bitsandbytes/function/bitsandbytes.functional.quantize_nf4#L813-L823","kind":"function","name":"quantize_nf4","path":"bitsandbytes/functional.py","language":"python","start_line":813,"end_line":823,"context_start_line":793,"context_end_line":843,"code":" data.div_(data.abs().max())\n\n assert data.numel() == 16\n\n return data\n\n\ndef quantize_fp4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"fp4\", quant_storage)\n\n\ndef quantize_nf4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"nf4\", quant_storage)\n\n\ndef quantize_4bit(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_type=\"fp4\",\n quant_storage=torch.uint8,\n) -> tuple[torch.Tensor, QuantState]:\n \"\"\"Quantize tensor A in blocks of 4-bit values.\n\n Quantizes tensor A by dividing it into blocks which are independently quantized.\n\n Args:\n A (`torch.Tensor`): The input tensor. Supports `float16`, `bfloat16`, or `float32` datatypes.\n absmax (`torch.Tensor`, *optional*): A tensor to use to store the absmax values.\n out (`torch.Tensor`, *optional*): A tensor to use to store the result.\n blocksize (`int`, *optional*):","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.quantize_4bit","uri":"program://bitsandbytes/function/bitsandbytes.functional.quantize_4bit#L826-L904","kind":"function","name":"quantize_4bit","path":"bitsandbytes/functional.py","language":"python","start_line":826,"end_line":904,"context_start_line":806,"context_end_line":924,"code":" quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"fp4\", quant_storage)\n\n\ndef quantize_nf4(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_storage=torch.uint8,\n):\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return quantize_4bit(A, absmax, out, blocksize, compress_statistics, \"nf4\", quant_storage)\n\n\ndef quantize_4bit(\n A: torch.Tensor,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=None,\n compress_statistics=False,\n quant_type=\"fp4\",\n quant_storage=torch.uint8,\n) -> tuple[torch.Tensor, QuantState]:\n \"\"\"Quantize tensor A in blocks of 4-bit values.\n\n Quantizes tensor A by dividing it into blocks which are independently quantized.\n\n Args:\n A (`torch.Tensor`): The input tensor. Supports `float16`, `bfloat16`, or `float32` datatypes.\n absmax (`torch.Tensor`, *optional*): A tensor to use to store the absmax values.\n out (`torch.Tensor`, *optional*): A tensor to use to store the result.\n blocksize (`int`, *optional*):\n The size of the blocks. Defaults to 128 on ROCm and 64 otherwise.\n Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.\n compress_statistics (`bool`, *optional*): Whether to additionally quantize the absmax values. Defaults to False.\n quant_type (`str`, *optional*): The data type to use: `nf4` or `fp4`. Defaults to `fp4`.\n quant_storage (`torch.dtype`, *optional*): The dtype of the tensor used to store the result. Defaults to `torch.uint8`.\n\n Raises:\n ValueError: Raised when the input data type is not supported.\n\n Returns:\n Tuple[`torch.Tensor`, `QuantState`]: A tuple containing the quantization results.\n - `torch.Tensor`: The quantized tensor with packed 4-bit values.\n - [`QuantState`]: The state object used to undo the quantization.\n \"\"\"\n\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n\n input_shape = A.shape\n\n _out, _absmax = torch.ops.bitsandbytes.quantize_4bit.default(\n A,\n blocksize,\n quant_type,\n quant_storage,\n )\n\n code = get_4bit_type(quant_type, device=A.device)\n\n if compress_statistics:\n offset = _absmax.mean()\n qabsmax, state2 = quantize_blockwise(_absmax - offset, blocksize=256)\n del _absmax\n state = QuantState(\n absmax=qabsmax,\n shape=input_shape,\n dtype=A.dtype,\n blocksize=blocksize,\n code=code,\n quant_type=quant_type,\n offset=offset,\n state2=state2,\n )\n else:\n state = QuantState(\n absmax=_absmax,\n shape=input_shape,\n dtype=A.dtype,\n blocksize=blocksize,\n code=code,\n quant_type=quant_type,\n )\n\n # TODO(matthewdouglas): Deprecate out kwarg\n out = out.copy_(_out) if out is not None else _out\n\n # TODO(matthewdouglas): Deprecate absmax kwarg\n if absmax is not None:\n state.absmax = absmax.copy_(state.absmax)\n\n return out, state\n\n\ndef dequantize_fp4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"fp4\")\n\n\ndef dequantize_nf4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.dequantize_fp4","uri":"program://bitsandbytes/function/bitsandbytes.functional.dequantize_fp4#L907-L916","kind":"function","name":"dequantize_fp4","path":"bitsandbytes/functional.py","language":"python","start_line":907,"end_line":916,"context_start_line":887,"context_end_line":936,"code":" else:\n state = QuantState(\n absmax=_absmax,\n shape=input_shape,\n dtype=A.dtype,\n blocksize=blocksize,\n code=code,\n quant_type=quant_type,\n )\n\n # TODO(matthewdouglas): Deprecate out kwarg\n out = out.copy_(_out) if out is not None else _out\n\n # TODO(matthewdouglas): Deprecate absmax kwarg\n if absmax is not None:\n state.absmax = absmax.copy_(state.absmax)\n\n return out, state\n\n\ndef dequantize_fp4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"fp4\")\n\n\ndef dequantize_nf4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"nf4\")\n\n\ndef dequantize_4bit(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.dequantize_nf4","uri":"program://bitsandbytes/function/bitsandbytes.functional.dequantize_nf4#L919-L928","kind":"function","name":"dequantize_nf4","path":"bitsandbytes/functional.py","language":"python","start_line":919,"end_line":928,"context_start_line":899,"context_end_line":948,"code":"\n # TODO(matthewdouglas): Deprecate absmax kwarg\n if absmax is not None:\n state.absmax = absmax.copy_(state.absmax)\n\n return out, state\n\n\ndef dequantize_fp4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"fp4\")\n\n\ndef dequantize_nf4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"nf4\")\n\n\ndef dequantize_4bit(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n quant_type=\"fp4\",\n) -> torch.Tensor:\n \"\"\"Dequantizes a packed 4-bit quantized tensor.\n\n The input tensor is dequantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is used for scaling\n the non-linear dequantization.\n\n Args:\n A (`torch.Tensor`): The quantized input tensor.\n quant_state ([`QuantState`], *optional*):\n The quantization state as returned by [`quantize_4bit`].","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.dequantize_4bit","uri":"program://bitsandbytes/function/bitsandbytes.functional.dequantize_4bit#L931-L1005","kind":"function","name":"dequantize_4bit","path":"bitsandbytes/functional.py","language":"python","start_line":931,"end_line":1005,"context_start_line":911,"context_end_line":1025,"code":" out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"fp4\")\n\n\ndef dequantize_nf4(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n) -> torch.Tensor:\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n return dequantize_4bit(A, quant_state, absmax, out, blocksize, \"nf4\")\n\n\ndef dequantize_4bit(\n A: torch.Tensor,\n quant_state: Optional[QuantState] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize: Optional[int] = None,\n quant_type=\"fp4\",\n) -> torch.Tensor:\n \"\"\"Dequantizes a packed 4-bit quantized tensor.\n\n The input tensor is dequantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is used for scaling\n the non-linear dequantization.\n\n Args:\n A (`torch.Tensor`): The quantized input tensor.\n quant_state ([`QuantState`], *optional*):\n The quantization state as returned by [`quantize_4bit`].\n Required if `absmax` is not provided.\n absmax (`torch.Tensor`, *optional*):\n A tensor containing the scaling values.\n Required if `quant_state` is not provided and ignored otherwise.\n out (`torch.Tensor`, *optional*): A tensor to use to store the result.\n blocksize (`int`, *optional*):\n The size of the blocks. Defaults to 128 on ROCm and 64 otherwise.\n Valid values are 64, 128, 256, 512, 1024, 2048, and 4096.\n quant_type (`str`, *optional*): The data type to use: `nf4` or `fp4`. Defaults to `fp4`.\n\n Raises:\n ValueError: Raised when the input data type or blocksize is not supported.\n\n Returns:\n `torch.Tensor`: The dequantized tensor.\n \"\"\"\n\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n\n if quant_state is None:\n assert absmax is not None and out is not None\n\n quant_state = QuantState(\n absmax=absmax,\n shape=out.shape,\n dtype=out.dtype,\n blocksize=blocksize,\n quant_type=quant_type,\n )\n\n else:\n absmax = quant_state.absmax\n\n if quant_state.nested:\n absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2)\n absmax += quant_state.offset\n if absmax.dtype != torch.float32:\n absmax = absmax.float()\n\n if out is not None:\n torch.ops.bitsandbytes.dequantize_4bit.out(\n A, absmax, quant_state.blocksize, quant_state.quant_type, quant_state.shape, quant_state.dtype, out=out\n )\n else:\n out = torch.ops.bitsandbytes.dequantize_4bit.default(\n A,\n absmax,\n quant_state.blocksize,\n quant_state.quant_type,\n quant_state.shape,\n quant_state.dtype,\n )\n\n if A.shape[0] == 1: # is transposed, transpose back\n return out.t()\n return out\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef quantize(\n A: Tensor,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n) -> tuple[Tensor, tuple[Tensor, Tensor]]:\n if code is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n code = code.to(A.device)\n\n absmax = torch.abs(A).max()\n if absmax.dtype != torch.float32:\n absmax = absmax.float()\n inp = A / absmax\n out = quantize_no_absmax(inp, code, out)\n return out, (absmax, code)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.quantize","uri":"program://bitsandbytes/function/bitsandbytes.functional.quantize#L1009-L1025","kind":"function","name":"quantize","path":"bitsandbytes/functional.py","language":"python","start_line":1009,"end_line":1025,"context_start_line":989,"context_end_line":1045,"code":" if out is not None:\n torch.ops.bitsandbytes.dequantize_4bit.out(\n A, absmax, quant_state.blocksize, quant_state.quant_type, quant_state.shape, quant_state.dtype, out=out\n )\n else:\n out = torch.ops.bitsandbytes.dequantize_4bit.default(\n A,\n absmax,\n quant_state.blocksize,\n quant_state.quant_type,\n quant_state.shape,\n quant_state.dtype,\n )\n\n if A.shape[0] == 1: # is transposed, transpose back\n return out.t()\n return out\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef quantize(\n A: Tensor,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n) -> tuple[Tensor, tuple[Tensor, Tensor]]:\n if code is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n code = code.to(A.device)\n\n absmax = torch.abs(A).max()\n if absmax.dtype != torch.float32:\n absmax = absmax.float()\n inp = A / absmax\n out = quantize_no_absmax(inp, code, out)\n return out, (absmax, code)\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef dequantize(\n A: Tensor,\n state: Optional[tuple[Tensor, Tensor]] = None,\n absmax: Optional[torch.Tensor] = None,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n) -> Tensor:\n assert state is not None or absmax is not None\n if code is None and state is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n code = code.to(A.device)\n\n if state is None:\n state = (absmax, code)\n out = dequantize_no_absmax(A, state[1], out)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.dequantize","uri":"program://bitsandbytes/function/bitsandbytes.functional.dequantize#L1029-L1046","kind":"function","name":"dequantize","path":"bitsandbytes/functional.py","language":"python","start_line":1029,"end_line":1046,"context_start_line":1009,"context_end_line":1066,"code":"def quantize(\n A: Tensor,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n) -> tuple[Tensor, tuple[Tensor, Tensor]]:\n if code is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n code = code.to(A.device)\n\n absmax = torch.abs(A).max()\n if absmax.dtype != torch.float32:\n absmax = absmax.float()\n inp = A / absmax\n out = quantize_no_absmax(inp, code, out)\n return out, (absmax, code)\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef dequantize(\n A: Tensor,\n state: Optional[tuple[Tensor, Tensor]] = None,\n absmax: Optional[torch.Tensor] = None,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n) -> Tensor:\n assert state is not None or absmax is not None\n if code is None and state is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n code = code.to(A.device)\n\n if state is None:\n state = (absmax, code)\n out = dequantize_no_absmax(A, state[1], out)\n return out * state[0]\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef quantize_no_absmax(A: Tensor, code: Tensor, out: Optional[torch.Tensor] = None) -> Tensor:\n \"\"\"\n Quantizes input tensor to 8-bit.\n\n Quantizes the 32-bit input tensor `A` to the 8-bit output tensor\n `out` using the quantization map `code`.\n\n Parameters\n ----------\n A : torch.Tensor\n The input tensor.\n code : torch.Tensor\n The quantization map.\n out : torch.Tensor, optional\n The output tensor. Needs to be of type byte.\n\n Returns","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.quantize_no_absmax","uri":"program://bitsandbytes/function/bitsandbytes.functional.quantize_no_absmax#L1050-L1077","kind":"function","name":"quantize_no_absmax","path":"bitsandbytes/functional.py","language":"python","start_line":1050,"end_line":1077,"context_start_line":1030,"context_end_line":1097,"code":" A: Tensor,\n state: Optional[tuple[Tensor, Tensor]] = None,\n absmax: Optional[torch.Tensor] = None,\n code: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n) -> Tensor:\n assert state is not None or absmax is not None\n if code is None and state is None:\n if \"dynamic\" not in name2qmap:\n name2qmap[\"dynamic\"] = create_dynamic_map().to(A.device)\n code = name2qmap[\"dynamic\"]\n code = code.to(A.device)\n\n if state is None:\n state = (absmax, code)\n out = dequantize_no_absmax(A, state[1], out)\n return out * state[0]\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef quantize_no_absmax(A: Tensor, code: Tensor, out: Optional[torch.Tensor] = None) -> Tensor:\n \"\"\"\n Quantizes input tensor to 8-bit.\n\n Quantizes the 32-bit input tensor `A` to the 8-bit output tensor\n `out` using the quantization map `code`.\n\n Parameters\n ----------\n A : torch.Tensor\n The input tensor.\n code : torch.Tensor\n The quantization map.\n out : torch.Tensor, optional\n The output tensor. Needs to be of type byte.\n\n Returns\n -------\n torch.Tensor:\n Quantized 8-bit tensor.\n \"\"\"\n with _cuda_device_of(A):\n if out is None:\n out = torch.zeros_like(A, dtype=torch.uint8)\n is_on_gpu([A, out])\n lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))\n\n return out\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef dequantize_no_absmax(A: Tensor, code: Tensor, out: Optional[torch.Tensor] = None) -> Tensor:\n \"\"\"\n Dequantizes the 8-bit tensor to 32-bit.\n\n Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via\n the quantization map `code`.\n\n Parameters\n ----------\n A : torch.Tensor\n The 8-bit input tensor.\n code : torch.Tensor\n The quantization map.\n out : torch.Tensor\n The 32-bit output tensor.\n\n Returns","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.dequantize_no_absmax","uri":"program://bitsandbytes/function/bitsandbytes.functional.dequantize_no_absmax#L1081-L1109","kind":"function","name":"dequantize_no_absmax","path":"bitsandbytes/functional.py","language":"python","start_line":1081,"end_line":1109,"context_start_line":1061,"context_end_line":1129,"code":" code : torch.Tensor\n The quantization map.\n out : torch.Tensor, optional\n The output tensor. Needs to be of type byte.\n\n Returns\n -------\n torch.Tensor:\n Quantized 8-bit tensor.\n \"\"\"\n with _cuda_device_of(A):\n if out is None:\n out = torch.zeros_like(A, dtype=torch.uint8)\n is_on_gpu([A, out])\n lib.cquantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()))\n\n return out\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef dequantize_no_absmax(A: Tensor, code: Tensor, out: Optional[torch.Tensor] = None) -> Tensor:\n \"\"\"\n Dequantizes the 8-bit tensor to 32-bit.\n\n Dequantizes the 8-bit tensor `A` to the 32-bit tensor `out` via\n the quantization map `code`.\n\n Parameters\n ----------\n A : torch.Tensor\n The 8-bit input tensor.\n code : torch.Tensor\n The quantization map.\n out : torch.Tensor\n The 32-bit output tensor.\n\n Returns\n -------\n torch.Tensor:\n 32-bit output tensor.\n \"\"\"\n with _cuda_device_of(A):\n if out is None:\n out = torch.zeros_like(A, dtype=torch.float32)\n is_on_gpu([code, A, out])\n stream = _get_tensor_stream(A)\n lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()), stream)\n\n return out\n\n\ndef optimizer_update_32bit(\n optimizer_name: str,\n g: Tensor,\n p: Tensor,\n state1: Tensor,\n beta1: float,\n eps: float,\n step: int,\n lr: float,\n state2: Optional[torch.Tensor] = None,\n beta2: float = 0.0,\n beta3: float = 0.0,\n alpha: float = 0.0,\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n unorm_vec: Optional[torch.Tensor] = None,\n max_unorm: float = 0.0,\n skip_zeros=False,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.optimizer_update_32bit","uri":"program://bitsandbytes/function/bitsandbytes.functional.optimizer_update_32bit#L1112-L1198","kind":"function","name":"optimizer_update_32bit","path":"bitsandbytes/functional.py","language":"python","start_line":1112,"end_line":1198,"context_start_line":1092,"context_end_line":1218,"code":" code : torch.Tensor\n The quantization map.\n out : torch.Tensor\n The 32-bit output tensor.\n\n Returns\n -------\n torch.Tensor:\n 32-bit output tensor.\n \"\"\"\n with _cuda_device_of(A):\n if out is None:\n out = torch.zeros_like(A, dtype=torch.float32)\n is_on_gpu([code, A, out])\n stream = _get_tensor_stream(A)\n lib.cdequantize(get_ptr(code), get_ptr(A), get_ptr(out), ct.c_int(A.numel()), stream)\n\n return out\n\n\ndef optimizer_update_32bit(\n optimizer_name: str,\n g: Tensor,\n p: Tensor,\n state1: Tensor,\n beta1: float,\n eps: float,\n step: int,\n lr: float,\n state2: Optional[torch.Tensor] = None,\n beta2: float = 0.0,\n beta3: float = 0.0,\n alpha: float = 0.0,\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n unorm_vec: Optional[torch.Tensor] = None,\n max_unorm: float = 0.0,\n skip_zeros=False,\n) -> None:\n \"\"\"\n Performs an inplace optimizer update with one or two optimizer states.\n\n Universal optimizer update for 32-bit state and 32/16-bit gradients/weights.\n\n Parameters\n ----------\n optimizer_name : str\n The name of the optimizer: {adam}.\n g : torch.Tensor\n Gradient tensor.\n p : torch.Tensor\n Parameter tensor.\n state1 : torch.Tensor\n Optimizer state 1.\n beta1 : float\n Optimizer beta1.\n eps : float\n Optimizer epsilon.\n weight_decay : float\n Weight decay.\n step : int\n Current optimizer step.\n lr : float\n The learning rate.\n state2 : torch.Tensor\n Optimizer state 2.\n beta2 : float\n Optimizer beta2.\n beta3 : float\n Optimizer beta3.\n alpha : float\n Optimizer alpha.\n gnorm_scale : float\n The factor to rescale the gradient to the max clip value.\n unorm_vec : torch.Tensor\n The tensor for the update norm.\n max_unorm : float\n The maximum update norm relative to the weight norm.\n skip_zeros : bool\n Whether to skip zero-valued gradients or not (default: False).\n \"\"\"\n\n param_norm = 0.0\n if max_unorm > 0.0:\n param_norm = torch.norm(p.data.float())\n\n is_on_gpu([g, p, state1, state2, unorm_vec])\n torch.ops.bitsandbytes.optimizer_update_32bit(\n optimizer_name,\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n lr,\n gnorm_scale,\n skip_zeros,\n )\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release. \"\n \"Please use optimizer_update_8bit_blockwise instead. \",\n category=FutureWarning,\n)\ndef optimizer_update_8bit(\n optimizer_name: str,\n g: Tensor,\n p: Tensor,\n state1: Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: Tensor,\n qmap2: Optional[torch.Tensor],","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.optimizer_update_8bit","uri":"program://bitsandbytes/function/bitsandbytes.functional.optimizer_update_8bit#L1206-L1335","kind":"function","name":"optimizer_update_8bit","path":"bitsandbytes/functional.py","language":"python","start_line":1206,"end_line":1335,"context_start_line":1186,"context_end_line":1355,"code":" max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n lr,\n gnorm_scale,\n skip_zeros,\n )\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release. \"\n \"Please use optimizer_update_8bit_blockwise instead. \",\n category=FutureWarning,\n)\ndef optimizer_update_8bit(\n optimizer_name: str,\n g: Tensor,\n p: Tensor,\n state1: Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: Tensor,\n qmap2: Optional[torch.Tensor],\n max1: Tensor,\n max2: Optional[torch.Tensor],\n new_max1: Tensor,\n new_max2: Optional[torch.Tensor],\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n unorm_vec: Optional[torch.Tensor] = None,\n max_unorm: float = 0.0,\n) -> None:\n \"\"\"\n Performs an inplace Adam update.\n\n Universal Adam update for 32/8-bit state and 32/16-bit gradients/weights.\n Uses AdamW formulation if weight decay > 0.0.\n\n Parameters\n ----------\n optimizer_name : str\n The name of the optimizer. Choices {adam, momentum}\n g : torch.Tensor\n Gradient tensor.\n p : torch.Tensor\n Parameter tensor.\n state1 : torch.Tensor\n Adam state 1.\n state2 : torch.Tensor\n Adam state 2.\n beta1 : float\n Adam beta1.\n beta2 : float\n Adam beta2.\n eps : float\n Adam epsilon.\n weight_decay : float\n Weight decay.\n step : int\n Current optimizer step.\n lr : float\n The learning rate.\n qmap1 : torch.Tensor\n Quantization map for first Adam state.\n qmap2 : torch.Tensor\n Quantization map for second Adam state.\n max1 : torch.Tensor\n Max value for first Adam state update.\n max2 : torch.Tensor\n Max value for second Adam state update.\n new_max1 : torch.Tensor\n Max value for the next Adam update of the first state.\n new_max2 : torch.Tensor\n Max value for the next Adam update of the second state.\n gnorm_scale : float\n The factor to rescale the gradient to the max clip value.\n unorm_vec : torch.Tensor\n The tensor for the update norm.\n max_unorm : float\n The maximum update norm relative to the weight norm.\n \"\"\"\n\n param_norm = 0.0\n if max_unorm > 0.0:\n param_norm = torch.norm(p.data.float())\n\n with _cuda_device_of(g):\n is_on_gpu([g, p, state1, state2, unorm_vec, qmap1, qmap2, max1, max2, new_max1, new_max2])\n if g.dtype == torch.float32 and state1.dtype == torch.uint8:\n str2optimizer8bit[optimizer_name][0](\n get_ptr(p),\n get_ptr(g),\n get_ptr(state1),\n get_ptr(state2),\n get_ptr(unorm_vec),\n ct.c_float(max_unorm),\n ct.c_float(param_norm),\n ct.c_float(beta1),\n ct.c_float(beta2),\n ct.c_float(eps),\n ct.c_int32(step),\n ct.c_float(lr),\n get_ptr(qmap1),\n get_ptr(qmap2),\n get_ptr(max1),\n get_ptr(max2),\n get_ptr(new_max1),\n get_ptr(new_max2),\n ct.c_float(weight_decay),\n ct.c_float(gnorm_scale),\n ct.c_int32(g.numel()),\n )\n elif g.dtype == torch.float16 and state1.dtype == torch.uint8:\n str2optimizer8bit[optimizer_name][1](\n get_ptr(p),\n get_ptr(g),\n get_ptr(state1),\n get_ptr(state2),\n get_ptr(unorm_vec),\n ct.c_float(max_unorm),\n ct.c_float(param_norm),\n ct.c_float(beta1),\n ct.c_float(beta2),\n ct.c_float(eps),\n ct.c_int32(step),\n ct.c_float(lr),\n get_ptr(qmap1),\n get_ptr(qmap2),\n get_ptr(max1),\n get_ptr(max2),\n get_ptr(new_max1),\n get_ptr(new_max2),\n ct.c_float(weight_decay),\n ct.c_float(gnorm_scale),\n ct.c_int32(g.numel()),\n )\n else:\n raise ValueError(\n f\"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}\",\n )\n\n\ndef optimizer_update_8bit_blockwise(\n optimizer_name: str,\n g: Tensor,\n p: Tensor,\n state1: Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float = 0.0,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.optimizer_update_8bit_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.functional.optimizer_update_8bit_blockwise#L1338-L1381","kind":"function","name":"optimizer_update_8bit_blockwise","path":"bitsandbytes/functional.py","language":"python","start_line":1338,"end_line":1381,"context_start_line":1318,"context_end_line":1401,"code":" ct.c_float(beta2),\n ct.c_float(eps),\n ct.c_int32(step),\n ct.c_float(lr),\n get_ptr(qmap1),\n get_ptr(qmap2),\n get_ptr(max1),\n get_ptr(max2),\n get_ptr(new_max1),\n get_ptr(new_max2),\n ct.c_float(weight_decay),\n ct.c_float(gnorm_scale),\n ct.c_int32(g.numel()),\n )\n else:\n raise ValueError(\n f\"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}\",\n )\n\n\ndef optimizer_update_8bit_blockwise(\n optimizer_name: str,\n g: Tensor,\n p: Tensor,\n state1: Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n is_on_gpu([p, g, state1, state2, qmap1, qmap2, absmax1, absmax2])\n\n torch.ops.bitsandbytes.optimizer_update_8bit_blockwise(\n optimizer_name,\n g,\n p,\n state1,\n state2,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n step,\n lr,\n qmap1,\n qmap2,\n absmax1,\n absmax2,\n weight_decay,\n gnorm_scale,\n skip_zeros,\n )\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5):\n \"\"\"Applies percentile clipping\n\n grad: torch.Tensor\n The gradient tensor.\n gnorm_vec: torch.Tensor\n Vector of gradient norms. 100 elements expected.\n step: int\n The current optimization steps (number of past gradient norms).\n\n \"\"\"\n with _cuda_device_of(grad):\n is_on_gpu([grad, gnorm_vec])\n if grad.dtype == torch.float32:\n lib.cpercentile_clipping_g32(\n get_ptr(grad),\n get_ptr(gnorm_vec),","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.percentile_clipping","uri":"program://bitsandbytes/function/bitsandbytes.functional.percentile_clipping#L1385-L1423","kind":"function","name":"percentile_clipping","path":"bitsandbytes/functional.py","language":"python","start_line":1385,"end_line":1423,"context_start_line":1365,"context_end_line":1443,"code":" state1,\n state2,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n step,\n lr,\n qmap1,\n qmap2,\n absmax1,\n absmax2,\n weight_decay,\n gnorm_scale,\n skip_zeros,\n )\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef percentile_clipping(grad: Tensor, gnorm_vec: Tensor, step: int, percentile: int = 5):\n \"\"\"Applies percentile clipping\n\n grad: torch.Tensor\n The gradient tensor.\n gnorm_vec: torch.Tensor\n Vector of gradient norms. 100 elements expected.\n step: int\n The current optimization steps (number of past gradient norms).\n\n \"\"\"\n with _cuda_device_of(grad):\n is_on_gpu([grad, gnorm_vec])\n if grad.dtype == torch.float32:\n lib.cpercentile_clipping_g32(\n get_ptr(grad),\n get_ptr(gnorm_vec),\n ct.c_int32(step),\n ct.c_int32(grad.numel()),\n )\n elif grad.dtype == torch.float16:\n lib.cpercentile_clipping_g16(\n get_ptr(grad),\n get_ptr(gnorm_vec),\n ct.c_int32(step),\n ct.c_int32(grad.numel()),\n )\n else:\n raise ValueError(f\"Gradient type {grad.dtype} not supported!\")\n\n current_gnorm = torch.sqrt(gnorm_vec[step % 100])\n vals, idx = torch.sort(gnorm_vec)\n clip_value = torch.sqrt(vals[percentile])\n gnorm_scale = 1.0\n\n if current_gnorm > clip_value:\n gnorm_scale = clip_value / current_gnorm\n\n return current_gnorm, clip_value, gnorm_scale\n\n\ndef check_matmul(A, B, out, transposed_A, transposed_B, expected_type=torch.int8):\n if not torch.cuda.is_initialized():\n torch.cuda.init()\n if A.dtype != expected_type or B.dtype != expected_type:\n raise TypeError(f\"Expected torch.int8 input tensors A and B, but got {A.dtype} and {B.dtype}\")\n\n sA = A.shape\n sB = B.shape\n tA = transposed_A\n tB = transposed_B\n\n correct = True\n\n if len(sA) == 2 and len(sB) == 2:\n if not tA and not tB and A.shape[1] != B.shape[0]:\n correct = False\n elif tA and not tB and A.shape[0] != B.shape[0]:\n correct = False","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.check_matmul","uri":"program://bitsandbytes/function/bitsandbytes.functional.check_matmul#L1426-L1507","kind":"function","name":"check_matmul","path":"bitsandbytes/functional.py","language":"python","start_line":1426,"end_line":1507,"context_start_line":1406,"context_end_line":1527,"code":" lib.cpercentile_clipping_g16(\n get_ptr(grad),\n get_ptr(gnorm_vec),\n ct.c_int32(step),\n ct.c_int32(grad.numel()),\n )\n else:\n raise ValueError(f\"Gradient type {grad.dtype} not supported!\")\n\n current_gnorm = torch.sqrt(gnorm_vec[step % 100])\n vals, idx = torch.sort(gnorm_vec)\n clip_value = torch.sqrt(vals[percentile])\n gnorm_scale = 1.0\n\n if current_gnorm > clip_value:\n gnorm_scale = clip_value / current_gnorm\n\n return current_gnorm, clip_value, gnorm_scale\n\n\ndef check_matmul(A, B, out, transposed_A, transposed_B, expected_type=torch.int8):\n if not torch.cuda.is_initialized():\n torch.cuda.init()\n if A.dtype != expected_type or B.dtype != expected_type:\n raise TypeError(f\"Expected torch.int8 input tensors A and B, but got {A.dtype} and {B.dtype}\")\n\n sA = A.shape\n sB = B.shape\n tA = transposed_A\n tB = transposed_B\n\n correct = True\n\n if len(sA) == 2 and len(sB) == 2:\n if not tA and not tB and A.shape[1] != B.shape[0]:\n correct = False\n elif tA and not tB and A.shape[0] != B.shape[0]:\n correct = False\n elif tA and tB and A.shape[0] != B.shape[1]:\n correct = False\n elif not tA and tB and A.shape[1] != B.shape[1]:\n correct = False\n elif len(sA) == 3 and len(sB) == 2:\n if not tA and not tB and A.shape[2] != B.shape[0]:\n correct = False\n elif tA and not tB and A.shape[1] != B.shape[0]:\n correct = False\n elif tA and tB and A.shape[1] != B.shape[1]:\n correct = False\n elif not tA and tB and A.shape[2] != B.shape[1]:\n correct = False\n elif len(sA) == 3 and len(sB) == 3:\n if not tA and not tB and A.shape[2] != B.shape[1]:\n correct = False\n elif tA and not tB and A.shape[1] != B.shape[1]:\n correct = False\n elif tA and tB and A.shape[1] != B.shape[2]:\n correct = False\n elif not tA and tB and A.shape[2] != B.shape[2]:\n correct = False\n\n if out is not None:\n sout = out.shape\n # special case common in backprop\n if not correct and len(sA) == 3 and len(sB) == 3:\n if sout[0] == sA[2] and sout[1] == sB[2] and sA[0] == sB[0] and sA[1] == sB[1]:\n correct = True\n else:\n if len(sA) == 2 and len(sB) == 2:\n if not tA and not tB:\n sout = (sA[0], sB[1])\n elif tA and tB:\n sout = (sA[1], sB[0])\n elif tA and not tB:\n sout = (sA[1], sB[1])\n elif not tA and tB:\n sout = (sA[0], sB[0])\n elif len(sA) == 3 and len(sB) == 2:\n if not tA and not tB:\n sout = (sA[0], sA[1], sB[1])\n elif tA and tB:\n sout = (sA[0], sA[2], sB[0])\n elif tA and not tB:\n sout = (sA[0], sA[2], sB[1])\n elif not tA and tB:\n sout = (sA[0], sA[1], sB[0])\n elif len(sA) == 3 and len(sB) == 3:\n if not tA and not tB:\n sout = (sA[0], sA[1], sB[2])\n elif tA and tB:\n sout = (sA[0], sA[2], sB[1])\n elif tA and not tB:\n sout = (sA[0], sA[2], sB[2])\n elif not tA and tB:\n sout = (sA[0], sA[1], sB[1])\n\n if not correct:\n raise ValueError(\n f\"Tensor dimensions incorrect for matrix mulitiplication: A x B: {sA} x {sB} with transpose for A x B: {tA} x {tB}.\",\n )\n\n return sout\n\n\ndef gemv_4bit(\n A: Tensor,\n B: Tensor,\n out: Optional[torch.Tensor] = None,\n transposed_A=False,\n transposed_B=False,\n state=None,\n):\n if state is None:\n raise ValueError(\"state cannot be None. gemv_4bit() requires the state from quantize_4bit()\")\n\n absmax = state.absmax\n if state.nested:\n absmax = dequantize_blockwise(absmax, state.state2) + state.offset\n\n if out is not None:\n torch.ops.bitsandbytes.gemv_4bit.out(\n A,","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.gemv_4bit","uri":"program://bitsandbytes/function/bitsandbytes.functional.gemv_4bit#L1510-L1544","kind":"function","name":"gemv_4bit","path":"bitsandbytes/functional.py","language":"python","start_line":1510,"end_line":1544,"context_start_line":1490,"context_end_line":1564,"code":" elif not tA and tB:\n sout = (sA[0], sA[1], sB[0])\n elif len(sA) == 3 and len(sB) == 3:\n if not tA and not tB:\n sout = (sA[0], sA[1], sB[2])\n elif tA and tB:\n sout = (sA[0], sA[2], sB[1])\n elif tA and not tB:\n sout = (sA[0], sA[2], sB[2])\n elif not tA and tB:\n sout = (sA[0], sA[1], sB[1])\n\n if not correct:\n raise ValueError(\n f\"Tensor dimensions incorrect for matrix mulitiplication: A x B: {sA} x {sB} with transpose for A x B: {tA} x {tB}.\",\n )\n\n return sout\n\n\ndef gemv_4bit(\n A: Tensor,\n B: Tensor,\n out: Optional[torch.Tensor] = None,\n transposed_A=False,\n transposed_B=False,\n state=None,\n):\n if state is None:\n raise ValueError(\"state cannot be None. gemv_4bit() requires the state from quantize_4bit()\")\n\n absmax = state.absmax\n if state.nested:\n absmax = dequantize_blockwise(absmax, state.state2) + state.offset\n\n if out is not None:\n torch.ops.bitsandbytes.gemv_4bit.out(\n A,\n B,\n state.shape,\n absmax,\n state.code,\n state.blocksize,\n out=out,\n )\n return out\n\n return torch.ops.bitsandbytes.gemv_4bit.default(\n A,\n B,\n state.shape,\n absmax,\n state.code,\n state.blocksize,\n )\n\n\ndef igemm(\n A: Tensor,\n B: Tensor,\n out: Optional[torch.Tensor] = None,\n transposed_A=False,\n transposed_B=False,\n):\n sout = check_matmul(A, B, out, transposed_A, transposed_B)\n if out is None:\n out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)\n if len(A.shape) == 3 and len(B.shape) == 3:\n if A.shape[0] == B.shape[0] and A.shape[2] == B.shape[1]:\n return batched_igemm(A, B, out)\n\n sA = A.shape\n sB = B.shape\n if transposed_A and len(sA) == 2:\n sA = (sA[1], sA[0])","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.igemm","uri":"program://bitsandbytes/function/bitsandbytes.functional.igemm#L1547-L1645","kind":"function","name":"igemm","path":"bitsandbytes/functional.py","language":"python","start_line":1547,"end_line":1645,"context_start_line":1527,"context_end_line":1665,"code":" A,\n B,\n state.shape,\n absmax,\n state.code,\n state.blocksize,\n out=out,\n )\n return out\n\n return torch.ops.bitsandbytes.gemv_4bit.default(\n A,\n B,\n state.shape,\n absmax,\n state.code,\n state.blocksize,\n )\n\n\ndef igemm(\n A: Tensor,\n B: Tensor,\n out: Optional[torch.Tensor] = None,\n transposed_A=False,\n transposed_B=False,\n):\n sout = check_matmul(A, B, out, transposed_A, transposed_B)\n if out is None:\n out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)\n if len(A.shape) == 3 and len(B.shape) == 3:\n if A.shape[0] == B.shape[0] and A.shape[2] == B.shape[1]:\n return batched_igemm(A, B, out)\n\n sA = A.shape\n sB = B.shape\n if transposed_A and len(sA) == 2:\n sA = (sA[1], sA[0])\n elif transposed_A and len(sA) == 3:\n sA = (sA[0], sA[2], sA[0])\n if transposed_B and len(sB) == 2:\n sB = (sB[1], sB[0])\n elif transposed_B and len(sB) == 3:\n sB = (sB[0], sB[2], sB[0])\n # this is a mess: cuBLAS expect column major, but PyTorch is row major.\n # So to perform the matrix multiplication, we have to treat A, B, and C matrices\n # (transpose of row major is column major)\n # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these\n\n # matrices in the input arguments for cuBLAS\n # column major: A @ B = C: [m, k] @ [k, n] = [m, n]\n # row major: B^T @ A^T = C^T: [m, k] @ [k, n] = [m, n]\n # column major with row major layout: B^T @ A^T = C^T: [k, m] @ [n, k] = [n, m]\n if len(sB) == 2:\n if B.stride()[0] == B.shape[1]:\n transposed_B = False\n elif B.stride()[1] == B.shape[0]:\n transposed_B = True\n if len(A.shape) == 2:\n if A.stride()[0] == A.shape[1]:\n transposed_A = False\n elif A.stride()[1] == A.shape[0]:\n transposed_A = True\n else:\n if A.stride()[1] == A.shape[2]:\n transposed_A = False\n elif A.stride()[2] == A.shape[1]:\n transposed_A = True\n\n if len(sA) == 2:\n n = sA[0]\n ldb = A.stride()[1 if transposed_A else 0]\n elif len(sA) == 3 and len(sB) == 2:\n n = sA[0] * sA[1]\n ldb = sA[2]\n\n m = sB[1]\n k = sB[0]\n lda = B.stride()[(1 if transposed_B else 0)]\n ldc = sB[1]\n elif len(sB) == 3:\n # special case\n assert len(sA) == 3\n if not (sA[0] == sB[0] and sA[1] == sB[1]):\n raise ValueError(\n f\"Only bsi,bso->io supported for tensor contractions, but dims for A x B were: {sA} x {sB}\",\n )\n\n transposed_A = True\n transposed_B = False\n\n m = sB[2]\n n = sA[2]\n k = sB[0] * sB[1]\n\n lda = m\n ldb = sA[2]\n ldc = m\n\n ptr = CUBLAS_Context.get_instance().get_context(A.device)\n\n # B^T @ A^T = C^T\n # [km, nk -> mn]\n is_on_gpu([B, A, out])\n lib.cigemm(\n ptr,\n ct.c_bool(transposed_B),\n ct.c_bool(transposed_A),\n ct.c_int32(m),\n ct.c_int32(n),\n ct.c_int32(k),\n get_ptr(B),\n get_ptr(A),\n get_ptr(out),\n ct.c_int32(lda),\n ct.c_int32(ldb),\n ct.c_int32(ldc),\n )\n return out\n\n\ndef batched_igemm(\n A: Tensor,\n B: Tensor,\n out: Optional[torch.Tensor] = None,\n transposed_A=False,\n transposed_B=False,\n):\n if not len(A.shape) == 3 or not len(B.shape) == 3:\n raise ValueError(f\"Expected 3-dimensional tensors for bmm, but got shapes A and B: {A.shape} and {B.shape}\")\n sout = check_matmul(A, B, out, transposed_A, transposed_B)\n if out is None:\n out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)\n\n if B.is_contiguous():\n lda = B.stride()[1]\n transposed_A = False\n else:\n s = B.stride()","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.batched_igemm","uri":"program://bitsandbytes/function/bitsandbytes.functional.batched_igemm#L1648-L1741","kind":"function","name":"batched_igemm","path":"bitsandbytes/functional.py","language":"python","start_line":1648,"end_line":1741,"context_start_line":1628,"context_end_line":1761,"code":" # B^T @ A^T = C^T\n # [km, nk -> mn]\n is_on_gpu([B, A, out])\n lib.cigemm(\n ptr,\n ct.c_bool(transposed_B),\n ct.c_bool(transposed_A),\n ct.c_int32(m),\n ct.c_int32(n),\n ct.c_int32(k),\n get_ptr(B),\n get_ptr(A),\n get_ptr(out),\n ct.c_int32(lda),\n ct.c_int32(ldb),\n ct.c_int32(ldc),\n )\n return out\n\n\ndef batched_igemm(\n A: Tensor,\n B: Tensor,\n out: Optional[torch.Tensor] = None,\n transposed_A=False,\n transposed_B=False,\n):\n if not len(A.shape) == 3 or not len(B.shape) == 3:\n raise ValueError(f\"Expected 3-dimensional tensors for bmm, but got shapes A and B: {A.shape} and {B.shape}\")\n sout = check_matmul(A, B, out, transposed_A, transposed_B)\n if out is None:\n out = torch.zeros(size=sout, dtype=torch.int32, device=A.device)\n\n if B.is_contiguous():\n lda = B.stride()[1]\n transposed_A = False\n else:\n s = B.stride()\n if s[0] != B.shape[0]:\n B = B.contiguous()\n lda = B.stride()[1]\n elif s[2] == B.shape[1]:\n transposed_A = True\n lda = B.stride()[2]\n else:\n if s[2] == 1:\n B = B.contiguous()\n lda = B.stride()[1]\n elif s[1] == 1:\n B = B.contiguous()\n lda = B.stride()[1]\n else:\n B = B.contiguous()\n lda = B.stride()[1]\n\n if A.is_contiguous():\n ldb = A.stride()[1]\n transposed_B = False\n else:\n s = A.stride()\n if s[0] != A.shape[0]:\n A = A.contiguous()\n ldb = A.stride()[1]\n transposed_B = False\n elif s[2] == A.shape[1]:\n ldb = A.stride()[2]\n transposed_B = True\n else:\n A = A.contiguous()\n ldb = A.stride()[1]\n transposed_B = False\n\n # this is a mess: cuBLAS expect column major, but PyTorch is row major.\n # So to perform the matrix multiplication, we have to treat A, B, and C matrices\n # (transpose of row major is column major)\n # This means we compute B^T A^T = C^T and we explicitly switch the dimensions of each of these\n # matrices in the input arguments for cuBLAS\n\n # column major: A @ B = C: [batch, m, k] @ [batch, k, n] = [batch, m, n]\n # row major: B^T @ A^T = C^T: [batch, m, k] @ [batch, k, n] = [batch, m, n]\n # column major with row major layout: B^T @ A^T = C^T: [batch, k, m] @ [batch, n, k] = [batch, n, m]\n num_batch = A.shape[0]\n n = A.shape[1]\n m = B.shape[2]\n k = B.shape[1]\n\n ldc = m\n\n strideA = B.shape[1] * B.shape[2]\n strideB = A.shape[1] * A.shape[2]\n strideC = A.shape[1] * B.shape[2]\n\n ptr = CUBLAS_Context.get_instance().get_context(A.device)\n\n is_on_gpu([B, A, out])\n lib.cbatched_igemm(\n ptr,\n ct.c_bool(transposed_B),\n ct.c_bool(transposed_A),\n ct.c_int32(m),\n ct.c_int32(n),\n ct.c_int32(k),\n get_ptr(B),\n get_ptr(A),\n get_ptr(out),\n ct.c_int32(lda),\n ct.c_int32(ldb),\n ct.c_int32(ldc),\n ct.c_long(strideA),\n ct.c_long(strideB),\n ct.c_long(strideC),\n ct.c_uint32(num_batch),\n )\n return out\n\n\ndef int8_linear_matmul(A: torch.Tensor, B: torch.Tensor, out: Optional[torch.Tensor] = None, dtype=torch.int32):\n \"\"\"Performs an 8-bit integer matrix multiplication.\n\n A linear transformation is applied such that `out = A @ B.T`. When possible, integer tensor core hardware is\n utilized to accelerate the operation.\n\n Args:\n A (`torch.Tensor`): The first matrix operand with the data type `torch.int8`.\n B (`torch.Tensor`): The second matrix operand with the data type `torch.int8`.\n out (`torch.Tensor`, *optional*): A pre-allocated tensor used to store the result.\n dtype (`torch.dtype`, *optional*): The expected data type of the output. Defaults to `torch.int32`.\n\n Raises:\n `NotImplementedError`: The operation is not supported in the current environment.\n `RuntimeError`: Raised when the cannot be completed for any other reason.\n\n Returns:\n `torch.Tensor`: The result of the operation.","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.int8_linear_matmul","uri":"program://bitsandbytes/function/bitsandbytes.functional.int8_linear_matmul#L1744-L1767","kind":"function","name":"int8_linear_matmul","path":"bitsandbytes/functional.py","language":"python","start_line":1744,"end_line":1767,"context_start_line":1724,"context_end_line":1787,"code":" ptr,\n ct.c_bool(transposed_B),\n ct.c_bool(transposed_A),\n ct.c_int32(m),\n ct.c_int32(n),\n ct.c_int32(k),\n get_ptr(B),\n get_ptr(A),\n get_ptr(out),\n ct.c_int32(lda),\n ct.c_int32(ldb),\n ct.c_int32(ldc),\n ct.c_long(strideA),\n ct.c_long(strideB),\n ct.c_long(strideC),\n ct.c_uint32(num_batch),\n )\n return out\n\n\ndef int8_linear_matmul(A: torch.Tensor, B: torch.Tensor, out: Optional[torch.Tensor] = None, dtype=torch.int32):\n \"\"\"Performs an 8-bit integer matrix multiplication.\n\n A linear transformation is applied such that `out = A @ B.T`. When possible, integer tensor core hardware is\n utilized to accelerate the operation.\n\n Args:\n A (`torch.Tensor`): The first matrix operand with the data type `torch.int8`.\n B (`torch.Tensor`): The second matrix operand with the data type `torch.int8`.\n out (`torch.Tensor`, *optional*): A pre-allocated tensor used to store the result.\n dtype (`torch.dtype`, *optional*): The expected data type of the output. Defaults to `torch.int32`.\n\n Raises:\n `NotImplementedError`: The operation is not supported in the current environment.\n `RuntimeError`: Raised when the cannot be completed for any other reason.\n\n Returns:\n `torch.Tensor`: The result of the operation.\n \"\"\"\n if out is not None:\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n return out\n\n return torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n\n\ndef int8_mm_dequant(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n):\n \"\"\"Performs dequantization on the result of a quantized int8 matrix multiplication.\n\n Args:\n A (`torch.Tensor` with dtype `torch.int32`): The result of a quantized int8 matrix multiplication.\n row_stats (`torch.Tensor`): The row-wise quantization statistics for the lhs operand of the matrix multiplication.\n col_stats (`torch.Tensor`): The column-wise quantization statistics for the rhs operand of the matrix multiplication.\n out (`torch.Tensor`, *optional*): A pre-allocated tensor to store the output of the operation.\n bias (`torch.Tensor`, *optional*): An optional bias vector to add to the result.\n\n Returns:\n `torch.Tensor`: The dequantized result with an optional bias, with dtype `torch.float16`.","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.int8_mm_dequant","uri":"program://bitsandbytes/function/bitsandbytes.functional.int8_mm_dequant#L1770-L1795","kind":"function","name":"int8_mm_dequant","path":"bitsandbytes/functional.py","language":"python","start_line":1770,"end_line":1795,"context_start_line":1750,"context_end_line":1815,"code":" Args:\n A (`torch.Tensor`): The first matrix operand with the data type `torch.int8`.\n B (`torch.Tensor`): The second matrix operand with the data type `torch.int8`.\n out (`torch.Tensor`, *optional*): A pre-allocated tensor used to store the result.\n dtype (`torch.dtype`, *optional*): The expected data type of the output. Defaults to `torch.int32`.\n\n Raises:\n `NotImplementedError`: The operation is not supported in the current environment.\n `RuntimeError`: Raised when the cannot be completed for any other reason.\n\n Returns:\n `torch.Tensor`: The result of the operation.\n \"\"\"\n if out is not None:\n torch.ops.bitsandbytes.int8_linear_matmul.out(A, B, out)\n return out\n\n return torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n\n\ndef int8_mm_dequant(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n):\n \"\"\"Performs dequantization on the result of a quantized int8 matrix multiplication.\n\n Args:\n A (`torch.Tensor` with dtype `torch.int32`): The result of a quantized int8 matrix multiplication.\n row_stats (`torch.Tensor`): The row-wise quantization statistics for the lhs operand of the matrix multiplication.\n col_stats (`torch.Tensor`): The column-wise quantization statistics for the rhs operand of the matrix multiplication.\n out (`torch.Tensor`, *optional*): A pre-allocated tensor to store the output of the operation.\n bias (`torch.Tensor`, *optional*): An optional bias vector to add to the result.\n\n Returns:\n `torch.Tensor`: The dequantized result with an optional bias, with dtype `torch.float16`.\n \"\"\"\n result = torch.ops.bitsandbytes.int8_mm_dequant.default(A, row_stats, col_stats, dtype=torch.float16, bias=bias)\n\n # TODO(matthewdouglas): Deprecate out kwarg\n if out is not None:\n return out.copy_(result)\n\n return result\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef get_colrow_absmax(\n A: torch.Tensor,\n row_stats: Optional[torch.Tensor] = None,\n col_stats: Optional[torch.Tensor] = None,\n nnz_block_ptr: Optional[torch.Tensor] = None,\n threshold=0.0,\n) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n \"\"\" \"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n The row-wise and column-wise absmax values are determined.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n \n This function is useful for training, but for inference it is advised to use [`get_row_absmax`] instead.\n The column-wise quantization scales are not typically needed in inference scenarios.\n ","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_colrow_absmax","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_colrow_absmax#L1799-L1853","kind":"function","name":"get_colrow_absmax","path":"bitsandbytes/functional.py","language":"python","start_line":1799,"end_line":1853,"context_start_line":1779,"context_end_line":1873,"code":" Args:\n A (`torch.Tensor` with dtype `torch.int32`): The result of a quantized int8 matrix multiplication.\n row_stats (`torch.Tensor`): The row-wise quantization statistics for the lhs operand of the matrix multiplication.\n col_stats (`torch.Tensor`): The column-wise quantization statistics for the rhs operand of the matrix multiplication.\n out (`torch.Tensor`, *optional*): A pre-allocated tensor to store the output of the operation.\n bias (`torch.Tensor`, *optional*): An optional bias vector to add to the result.\n\n Returns:\n `torch.Tensor`: The dequantized result with an optional bias, with dtype `torch.float16`.\n \"\"\"\n result = torch.ops.bitsandbytes.int8_mm_dequant.default(A, row_stats, col_stats, dtype=torch.float16, bias=bias)\n\n # TODO(matthewdouglas): Deprecate out kwarg\n if out is not None:\n return out.copy_(result)\n\n return result\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef get_colrow_absmax(\n A: torch.Tensor,\n row_stats: Optional[torch.Tensor] = None,\n col_stats: Optional[torch.Tensor] = None,\n nnz_block_ptr: Optional[torch.Tensor] = None,\n threshold=0.0,\n) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n \"\"\" \"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n The row-wise and column-wise absmax values are determined.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n \n This function is useful for training, but for inference it is advised to use [`get_row_absmax`] instead.\n The column-wise quantization scales are not typically needed in inference scenarios.\n \n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): Input tensor.\n row_stats (`torch.Tensor`, *optional*): If provided, calculation of row statistics is skipped.\n col_stats (`torch.Tensor`, *optional*): If provided, calculation of column statistics is skipped.\n nnz_block_ptr (`torch.Tensor`, *optional*): Not used.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple containing quantization statistics.\n - `torch.Tensor` with dtype `torch.float32`: The row-wise quantization statistics.\n - `torch.Tensor` with dtype `torch.float32`: The column-wise quantization statistics.\n - `torch.Tensor` with dtype `torch.bool`, *optional*: A mask indicating the locations of outliers in the input tensor.\n \"\"\"\n assert A.is_floating_point()\n\n outlier_mask = None\n\n if row_stats is None or col_stats is None:\n absA = A.abs().view(-1, A.shape[-1])\n\n if threshold > 0.0:\n # Filter outliers from stats when enabled\n outlier_mask = absA >= threshold\n absA.masked_fill_(outlier_mask, 0.0)\n\n if row_stats is None:\n # shape [rows]; unsqueeze(-1) gives [rows,1]\n # We have a CUDA kernel for row max, but not yet for cols.\n row_stats = get_row_absmax(A, threshold)\n\n if col_stats is None:\n # shape [cols]; unsqueeze(0) gives [1,cols]\n col_stats = absA.amax(dim=0, keepdim=False).float()\n\n return row_stats, col_stats, outlier_mask\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef get_row_absmax(A: torch.Tensor, threshold=0.0):\n \"\"\"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): The input matrix.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `torch.Tensor` with dtype `torch.float32`: The absolute maximum value for each row, with outliers ignored.\n \"\"\"\n\n assert A.dtype == torch.float16\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_row_absmax","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_row_absmax#L1857-L1891","kind":"function","name":"get_row_absmax","path":"bitsandbytes/functional.py","language":"python","start_line":1857,"end_line":1891,"context_start_line":1837,"context_end_line":1911,"code":" absA = A.abs().view(-1, A.shape[-1])\n\n if threshold > 0.0:\n # Filter outliers from stats when enabled\n outlier_mask = absA >= threshold\n absA.masked_fill_(outlier_mask, 0.0)\n\n if row_stats is None:\n # shape [rows]; unsqueeze(-1) gives [rows,1]\n # We have a CUDA kernel for row max, but not yet for cols.\n row_stats = get_row_absmax(A, threshold)\n\n if col_stats is None:\n # shape [cols]; unsqueeze(0) gives [1,cols]\n col_stats = absA.amax(dim=0, keepdim=False).float()\n\n return row_stats, col_stats, outlier_mask\n\n\n@deprecated(\"This function is deprecated and will be removed in a future release.\", category=FutureWarning)\ndef get_row_absmax(A: torch.Tensor, threshold=0.0):\n \"\"\"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): The input matrix.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `torch.Tensor` with dtype `torch.float32`: The absolute maximum value for each row, with outliers ignored.\n \"\"\"\n\n assert A.dtype == torch.float16\n\n rows = prod(A.shape[:-1])\n cols = A.shape[-1]\n\n row_stats = torch.empty((rows,), dtype=torch.float32, device=A.device)\n\n is_on_gpu([A])\n\n with _cuda_device_of(A):\n lib.cget_row_stats(\n get_ptr(A),\n get_ptr(row_stats),\n ct.c_float(threshold),\n ct.c_int32(rows),\n ct.c_int32(cols),\n _get_tensor_stream(A),\n )\n\n return row_stats\n\n\nclass COOSparseTensor:\n def __init__(\n self, rows: int, cols: int, nnz: int, rowidx: torch.Tensor, colidx: torch.Tensor, values: torch.Tensor\n ):\n assert rowidx.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colidx.numel() == nnz\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowidx = rowidx\n self.colidx = colidx\n self.values = values\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.COOSparseTensor","uri":"program://bitsandbytes/class/bitsandbytes.functional.COOSparseTensor#L1894-L1910","kind":"class","name":"COOSparseTensor","path":"bitsandbytes/functional.py","language":"python","start_line":1894,"end_line":1910,"context_start_line":1874,"context_end_line":1930,"code":" rows = prod(A.shape[:-1])\n cols = A.shape[-1]\n\n row_stats = torch.empty((rows,), dtype=torch.float32, device=A.device)\n\n is_on_gpu([A])\n\n with _cuda_device_of(A):\n lib.cget_row_stats(\n get_ptr(A),\n get_ptr(row_stats),\n ct.c_float(threshold),\n ct.c_int32(rows),\n ct.c_int32(cols),\n _get_tensor_stream(A),\n )\n\n return row_stats\n\n\nclass COOSparseTensor:\n def __init__(\n self, rows: int, cols: int, nnz: int, rowidx: torch.Tensor, colidx: torch.Tensor, values: torch.Tensor\n ):\n assert rowidx.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colidx.numel() == nnz\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowidx = rowidx\n self.colidx = colidx\n self.values = values\n\n\nclass CSRSparseTensor:\n def __init__(self, rows, cols, nnz, rowptr, colidx, values):\n assert rowptr.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert colidx.numel() == nnz\n assert rowptr.numel() == rows + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowptr = rowptr\n self.colidx = colidx\n self.values = values\n\n\nclass CSCSparseTensor:","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.CSRSparseTensor","uri":"program://bitsandbytes/class/bitsandbytes.functional.CSRSparseTensor#L1913-L1927","kind":"class","name":"CSRSparseTensor","path":"bitsandbytes/functional.py","language":"python","start_line":1913,"end_line":1927,"context_start_line":1893,"context_end_line":1947,"code":"\nclass COOSparseTensor:\n def __init__(\n self, rows: int, cols: int, nnz: int, rowidx: torch.Tensor, colidx: torch.Tensor, values: torch.Tensor\n ):\n assert rowidx.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colidx.numel() == nnz\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowidx = rowidx\n self.colidx = colidx\n self.values = values\n\n\nclass CSRSparseTensor:\n def __init__(self, rows, cols, nnz, rowptr, colidx, values):\n assert rowptr.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert colidx.numel() == nnz\n assert rowptr.numel() == rows + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowptr = rowptr\n self.colidx = colidx\n self.values = values\n\n\nclass CSCSparseTensor:\n def __init__(self, rows, cols, nnz, colptr, rowidx, values):\n assert colptr.dtype == torch.int32\n assert rowidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colptr.numel() == cols + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.colptr = colptr\n self.rowidx = rowidx\n self.values = values\n\n\ndef coo2csr(cooA):","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.CSCSparseTensor","uri":"program://bitsandbytes/class/bitsandbytes.functional.CSCSparseTensor#L1930-L1944","kind":"class","name":"CSCSparseTensor","path":"bitsandbytes/functional.py","language":"python","start_line":1930,"end_line":1944,"context_start_line":1910,"context_end_line":1964,"code":" self.values = values\n\n\nclass CSRSparseTensor:\n def __init__(self, rows, cols, nnz, rowptr, colidx, values):\n assert rowptr.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert colidx.numel() == nnz\n assert rowptr.numel() == rows + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowptr = rowptr\n self.colidx = colidx\n self.values = values\n\n\nclass CSCSparseTensor:\n def __init__(self, rows, cols, nnz, colptr, rowidx, values):\n assert colptr.dtype == torch.int32\n assert rowidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colptr.numel() == cols + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.colptr = colptr\n self.rowidx = rowidx\n self.values = values\n\n\ndef coo2csr(cooA):\n values, counts = torch.unique(cooA.rowidx, return_counts=True)\n values.add_(1)\n rowptr = torch.zeros((cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device)\n rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)\n rowptr.cumsum_(0)\n return CSRSparseTensor(cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values)\n\n\ndef coo2csc(cooA):\n val, col2rowidx = torch.sort(cooA.colidx)\n rowidx = cooA.rowidx[col2rowidx]\n values = cooA.values[col2rowidx]\n colvalues, counts = torch.unique(val, return_counts=True)\n colvalues.add_(1)\n colptr = torch.zeros((cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device)\n colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)\n colptr.cumsum_(0)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.coo2csr","uri":"program://bitsandbytes/function/bitsandbytes.functional.coo2csr#L1947-L1953","kind":"function","name":"coo2csr","path":"bitsandbytes/functional.py","language":"python","start_line":1947,"end_line":1953,"context_start_line":1927,"context_end_line":1973,"code":" self.values = values\n\n\nclass CSCSparseTensor:\n def __init__(self, rows, cols, nnz, colptr, rowidx, values):\n assert colptr.dtype == torch.int32\n assert rowidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colptr.numel() == cols + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.colptr = colptr\n self.rowidx = rowidx\n self.values = values\n\n\ndef coo2csr(cooA):\n values, counts = torch.unique(cooA.rowidx, return_counts=True)\n values.add_(1)\n rowptr = torch.zeros((cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device)\n rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)\n rowptr.cumsum_(0)\n return CSRSparseTensor(cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values)\n\n\ndef coo2csc(cooA):\n val, col2rowidx = torch.sort(cooA.colidx)\n rowidx = cooA.rowidx[col2rowidx]\n values = cooA.values[col2rowidx]\n colvalues, counts = torch.unique(val, return_counts=True)\n colvalues.add_(1)\n colptr = torch.zeros((cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device)\n colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)\n colptr.cumsum_(0)\n return CSCSparseTensor(cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values)\n\n\ndef coo_zeros(rows, cols, nnz, device, dtype=torch.half):\n rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n values = torch.zeros((nnz,), dtype=dtype, device=device)\n return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.coo2csc","uri":"program://bitsandbytes/function/bitsandbytes.functional.coo2csc#L1956-L1965","kind":"function","name":"coo2csc","path":"bitsandbytes/functional.py","language":"python","start_line":1956,"end_line":1965,"context_start_line":1936,"context_end_line":1985,"code":" assert rowidx.numel() == nnz\n assert colptr.numel() == cols + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.colptr = colptr\n self.rowidx = rowidx\n self.values = values\n\n\ndef coo2csr(cooA):\n values, counts = torch.unique(cooA.rowidx, return_counts=True)\n values.add_(1)\n rowptr = torch.zeros((cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device)\n rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)\n rowptr.cumsum_(0)\n return CSRSparseTensor(cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values)\n\n\ndef coo2csc(cooA):\n val, col2rowidx = torch.sort(cooA.colidx)\n rowidx = cooA.rowidx[col2rowidx]\n values = cooA.values[col2rowidx]\n colvalues, counts = torch.unique(val, return_counts=True)\n colvalues.add_(1)\n colptr = torch.zeros((cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device)\n colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)\n colptr.cumsum_(0)\n return CSCSparseTensor(cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values)\n\n\ndef coo_zeros(rows, cols, nnz, device, dtype=torch.half):\n rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n values = torch.zeros((nnz,), dtype=dtype, device=device)\n return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)\n\n\ndef int8_double_quant(\n A: torch.Tensor,\n col_stats: Optional[torch.Tensor] = None,\n row_stats: Optional[torch.Tensor] = None,\n out_col: Optional[torch.Tensor] = None,\n out_row: Optional[torch.Tensor] = None,\n threshold=0.0,\n):\n \"\"\"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n The statistics are determined both row-wise and column-wise (transposed).","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.coo_zeros","uri":"program://bitsandbytes/function/bitsandbytes.functional.coo_zeros#L1968-L1972","kind":"function","name":"coo_zeros","path":"bitsandbytes/functional.py","language":"python","start_line":1968,"end_line":1972,"context_start_line":1948,"context_end_line":1992,"code":" values, counts = torch.unique(cooA.rowidx, return_counts=True)\n values.add_(1)\n rowptr = torch.zeros((cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device)\n rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)\n rowptr.cumsum_(0)\n return CSRSparseTensor(cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values)\n\n\ndef coo2csc(cooA):\n val, col2rowidx = torch.sort(cooA.colidx)\n rowidx = cooA.rowidx[col2rowidx]\n values = cooA.values[col2rowidx]\n colvalues, counts = torch.unique(val, return_counts=True)\n colvalues.add_(1)\n colptr = torch.zeros((cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device)\n colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)\n colptr.cumsum_(0)\n return CSCSparseTensor(cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values)\n\n\ndef coo_zeros(rows, cols, nnz, device, dtype=torch.half):\n rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n values = torch.zeros((nnz,), dtype=dtype, device=device)\n return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)\n\n\ndef int8_double_quant(\n A: torch.Tensor,\n col_stats: Optional[torch.Tensor] = None,\n row_stats: Optional[torch.Tensor] = None,\n out_col: Optional[torch.Tensor] = None,\n out_row: Optional[torch.Tensor] = None,\n threshold=0.0,\n):\n \"\"\"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n The statistics are determined both row-wise and column-wise (transposed).\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n \n This function is useful for training, but for inference it is advised to use [`int8_vectorwise_quant`] instead.\n This implementation performs additional column-wise transposed calculations which are not optimized.\n ","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.int8_double_quant","uri":"program://bitsandbytes/function/bitsandbytes.functional.int8_double_quant#L1975-L2023","kind":"function","name":"int8_double_quant","path":"bitsandbytes/functional.py","language":"python","start_line":1975,"end_line":2023,"context_start_line":1955,"context_end_line":2043,"code":"\ndef coo2csc(cooA):\n val, col2rowidx = torch.sort(cooA.colidx)\n rowidx = cooA.rowidx[col2rowidx]\n values = cooA.values[col2rowidx]\n colvalues, counts = torch.unique(val, return_counts=True)\n colvalues.add_(1)\n colptr = torch.zeros((cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device)\n colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)\n colptr.cumsum_(0)\n return CSCSparseTensor(cooA.rows, cooA.cols, cooA.nnz, colptr, rowidx, values)\n\n\ndef coo_zeros(rows, cols, nnz, device, dtype=torch.half):\n rowidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n colidx = torch.zeros((nnz,), dtype=torch.int32, device=device)\n values = torch.zeros((nnz,), dtype=dtype, device=device)\n return COOSparseTensor(rows, cols, nnz, rowidx, colidx, values)\n\n\ndef int8_double_quant(\n A: torch.Tensor,\n col_stats: Optional[torch.Tensor] = None,\n row_stats: Optional[torch.Tensor] = None,\n out_col: Optional[torch.Tensor] = None,\n out_row: Optional[torch.Tensor] = None,\n threshold=0.0,\n):\n \"\"\"Determine the quantization statistics for input matrix `A` in accordance to the `LLM.int8()` algorithm.\n\n The statistics are determined both row-wise and column-wise (transposed).\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n \n This function is useful for training, but for inference it is advised to use [`int8_vectorwise_quant`] instead.\n This implementation performs additional column-wise transposed calculations which are not optimized.\n \n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): The input matrix.\n col_stats (`torch.Tensor`, *optional*): A pre-allocated tensor to hold the column-wise quantization scales.\n row_stats (`torch.Tensor`, *optional*): A pre-allocated tensor to hold the row-wise quantization scales.\n out_col (`torch.Tensor`, *optional*): A pre-allocated tensor to hold the column-wise quantized data.\n out_row (`torch.Tensor`, *optional*): A pre-allocated tensor to hold the row-wise quantized data.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple containing the quantized tensor and relevant statistics.\n - `torch.Tensor` with dtype `torch.int8`: The row-wise quantized data.\n - `torch.Tensor` with dtype `torch.int8`: The column-wise quantized data.\n - `torch.Tensor` with dtype `torch.float32`: The row-wise quantization scales.\n - `torch.Tensor` with dtype `torch.float32`: The column-wise quantization scales.\n - `torch.Tensor` with dtype `torch.int32`, *optional*: A list of column indices which contain outlier features.\n \"\"\"\n\n if row_stats is not None:\n raise ValueError(\"row_stats must be None. int8_double_quant() does not support pre-allocated row_stats.\")\n if col_stats is not None:\n raise ValueError(\"col_stats must be None. int8_double_quant() does not support pre-allocated col_stats.\")\n if out_col is not None:\n raise ValueError(\"out_col must be None. int8_double_quant() does not support pre-allocated out_col.\")\n if out_row is not None:\n raise ValueError(\"out_row must be None. int8_double_quant() does not support pre-allocated out_row.\")\n\n return torch.ops.bitsandbytes.int8_double_quant.default(A, threshold=threshold)\n\n\ndef int8_vectorwise_dequant(A: torch.Tensor, stats: torch.Tensor):\n \"\"\"Dequantizes a tensor with dtype `torch.int8` to `torch.float32`.\n\n Args:\n A (`torch.Tensor` with dtype `torch.int8`): The quantized int8 tensor.\n stats (`torch.Tensor` with dtype `torch.float32`): The row-wise quantization statistics.\n\n Returns:\n `torch.Tensor` with dtype `torch.float32`: The dequantized tensor.\n \"\"\"\n # To dequantize we divide by 127, or multiply by the reciprocal.\n return torch.ops.bitsandbytes.int8_vectorwise_dequant.default(A, stats)\n\n\ndef int8_vectorwise_quant(A: torch.Tensor, threshold=0.0):\n \"\"\"Quantizes a tensor with dtype `torch.float16` to `torch.int8` in accordance to the `LLM.int8()` algorithm.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.int8_vectorwise_dequant","uri":"program://bitsandbytes/function/bitsandbytes.functional.int8_vectorwise_dequant#L2026-L2037","kind":"function","name":"int8_vectorwise_dequant","path":"bitsandbytes/functional.py","language":"python","start_line":2026,"end_line":2037,"context_start_line":2006,"context_end_line":2057,"code":" `Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple containing the quantized tensor and relevant statistics.\n - `torch.Tensor` with dtype `torch.int8`: The row-wise quantized data.\n - `torch.Tensor` with dtype `torch.int8`: The column-wise quantized data.\n - `torch.Tensor` with dtype `torch.float32`: The row-wise quantization scales.\n - `torch.Tensor` with dtype `torch.float32`: The column-wise quantization scales.\n - `torch.Tensor` with dtype `torch.int32`, *optional*: A list of column indices which contain outlier features.\n \"\"\"\n\n if row_stats is not None:\n raise ValueError(\"row_stats must be None. int8_double_quant() does not support pre-allocated row_stats.\")\n if col_stats is not None:\n raise ValueError(\"col_stats must be None. int8_double_quant() does not support pre-allocated col_stats.\")\n if out_col is not None:\n raise ValueError(\"out_col must be None. int8_double_quant() does not support pre-allocated out_col.\")\n if out_row is not None:\n raise ValueError(\"out_row must be None. int8_double_quant() does not support pre-allocated out_row.\")\n\n return torch.ops.bitsandbytes.int8_double_quant.default(A, threshold=threshold)\n\n\ndef int8_vectorwise_dequant(A: torch.Tensor, stats: torch.Tensor):\n \"\"\"Dequantizes a tensor with dtype `torch.int8` to `torch.float32`.\n\n Args:\n A (`torch.Tensor` with dtype `torch.int8`): The quantized int8 tensor.\n stats (`torch.Tensor` with dtype `torch.float32`): The row-wise quantization statistics.\n\n Returns:\n `torch.Tensor` with dtype `torch.float32`: The dequantized tensor.\n \"\"\"\n # To dequantize we divide by 127, or multiply by the reciprocal.\n return torch.ops.bitsandbytes.int8_vectorwise_dequant.default(A, stats)\n\n\ndef int8_vectorwise_quant(A: torch.Tensor, threshold=0.0):\n \"\"\"Quantizes a tensor with dtype `torch.float16` to `torch.int8` in accordance to the `LLM.int8()` algorithm.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): The input tensor.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple containing the quantized tensor and relevant statistics.\n - `torch.Tensor` with dtype `torch.int8`: The quantized data.\n - `torch.Tensor` with dtype `torch.float32`: The quantization scales.\n - `torch.Tensor` with dtype `torch.int32`, *optional*: A list of column indices which contain outlier features.\n \"\"\"","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.int8_vectorwise_quant","uri":"program://bitsandbytes/function/bitsandbytes.functional.int8_vectorwise_quant#L2040-L2058","kind":"function","name":"int8_vectorwise_quant","path":"bitsandbytes/functional.py","language":"python","start_line":2040,"end_line":2058,"context_start_line":2020,"context_end_line":2078,"code":" if out_row is not None:\n raise ValueError(\"out_row must be None. int8_double_quant() does not support pre-allocated out_row.\")\n\n return torch.ops.bitsandbytes.int8_double_quant.default(A, threshold=threshold)\n\n\ndef int8_vectorwise_dequant(A: torch.Tensor, stats: torch.Tensor):\n \"\"\"Dequantizes a tensor with dtype `torch.int8` to `torch.float32`.\n\n Args:\n A (`torch.Tensor` with dtype `torch.int8`): The quantized int8 tensor.\n stats (`torch.Tensor` with dtype `torch.float32`): The row-wise quantization statistics.\n\n Returns:\n `torch.Tensor` with dtype `torch.float32`: The dequantized tensor.\n \"\"\"\n # To dequantize we divide by 127, or multiply by the reciprocal.\n return torch.ops.bitsandbytes.int8_vectorwise_dequant.default(A, stats)\n\n\ndef int8_vectorwise_quant(A: torch.Tensor, threshold=0.0):\n \"\"\"Quantizes a tensor with dtype `torch.float16` to `torch.int8` in accordance to the `LLM.int8()` algorithm.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): The input tensor.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple containing the quantized tensor and relevant statistics.\n - `torch.Tensor` with dtype `torch.int8`: The quantized data.\n - `torch.Tensor` with dtype `torch.float32`: The quantization scales.\n - `torch.Tensor` with dtype `torch.int32`, *optional*: A list of column indices which contain outlier features.\n \"\"\"\n return torch.ops.bitsandbytes.int8_vectorwise_quant.default(A, threshold)\n\n\ndef spmm_coo(\n cooA: Union[COOSparseTensor, torch.Tensor],\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n):\n if not isinstance(cooA, COOSparseTensor):\n assert cooA.is_sparse and cooA.layout == torch.sparse_coo, (\n \"Tensor must be `COOSparseTensor or a PyTorch COO tensor.\"\n )\n\n # Convert to custom COOSparseTensor\n cooA = COOSparseTensor(\n rows=cooA.shape[0],\n cols=cooA.shape[1],\n nnz=cooA._nnz(),\n rowidx=cooA.indices()[0].int(),\n colidx=cooA.indices()[1].int(),\n values=cooA.values(),","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.spmm_coo","uri":"program://bitsandbytes/function/bitsandbytes.functional.spmm_coo#L2061-L2125","kind":"function","name":"spmm_coo","path":"bitsandbytes/functional.py","language":"python","start_line":2061,"end_line":2125,"context_start_line":2041,"context_end_line":2145,"code":" \"\"\"Quantizes a tensor with dtype `torch.float16` to `torch.int8` in accordance to the `LLM.int8()` algorithm.\n\n For more information, see the [LLM.int8() paper](https://arxiv.org/abs/2208.07339).\n\n Args:\n A (`torch.Tensor` with dtype `torch.float16`): The input tensor.\n threshold (`float`, *optional*):\n An optional threshold for sparse decomposition of outlier features.\n\n No outliers are held back when 0.0. Defaults to 0.0.\n\n Returns:\n `Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple containing the quantized tensor and relevant statistics.\n - `torch.Tensor` with dtype `torch.int8`: The quantized data.\n - `torch.Tensor` with dtype `torch.float32`: The quantization scales.\n - `torch.Tensor` with dtype `torch.int32`, *optional*: A list of column indices which contain outlier features.\n \"\"\"\n return torch.ops.bitsandbytes.int8_vectorwise_quant.default(A, threshold)\n\n\ndef spmm_coo(\n cooA: Union[COOSparseTensor, torch.Tensor],\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n):\n if not isinstance(cooA, COOSparseTensor):\n assert cooA.is_sparse and cooA.layout == torch.sparse_coo, (\n \"Tensor must be `COOSparseTensor or a PyTorch COO tensor.\"\n )\n\n # Convert to custom COOSparseTensor\n cooA = COOSparseTensor(\n rows=cooA.shape[0],\n cols=cooA.shape[1],\n nnz=cooA._nnz(),\n rowidx=cooA.indices()[0].int(),\n colidx=cooA.indices()[1].int(),\n values=cooA.values(),\n )\n\n if out is None:\n out = torch.empty((cooA.rows, B.shape[1]), device=B.device, dtype=B.dtype)\n nnz = cooA.nnz\n assert cooA.rowidx.numel() == nnz\n assert cooA.colidx.numel() == nnz\n assert cooA.values.numel() == nnz\n assert cooA.cols == B.shape[0]\n\n transposed_B = not B.is_contiguous()\n\n ldb = B.stride()[(1 if transposed_B else 0)]\n ldc = B.shape[1]\n\n ptr = Cusparse_Context.get_instance().context\n\n ptrRowidx = get_ptr(cooA.rowidx)\n ptrColidx = get_ptr(cooA.colidx)\n ptrValues = get_ptr(cooA.values)\n ptrB = get_ptr(B)\n ptrC = get_ptr(out)\n cnnz = ct.c_int32(cooA.nnz)\n crowsA = ct.c_int32(cooA.rows)\n ccolsA = ct.c_int32(cooA.cols)\n ccolsB = ct.c_int32(B.shape[1])\n cldb = ct.c_int32(ldb)\n cldc = ct.c_int32(ldc)\n\n is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out])\n lib.cspmm_coo(\n ptr,\n ptrRowidx,\n ptrColidx,\n ptrValues,\n cnnz,\n crowsA,\n ccolsA,\n ccolsB,\n cldb,\n ptrB,\n cldc,\n ptrC,\n ct.c_bool(transposed_B),\n )\n\n return out\n\n\ndef spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):\n if out is None:\n out = torch.zeros((cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype)\n nnz = cooA.nnz\n\n assert cooA.rowidx.numel() == nnz\n assert cooA.colidx.numel() == nnz\n assert cooA.values.numel() == nnz\n assert cooA.cols == B.shape[0], f\"{cooA.cols} vs {B.shape}\"\n\n _, counts = torch.unique(cooA.rowidx, return_counts=True)\n offset = counts.cumsum(0).int()\n max_count, max_idx = torch.sort(counts, descending=True)\n max_idx = max_idx.int()\n max_count = max_count.int()\n assert max_count[0] <= 32, f\"Current max count per row is 8 but found {max_count[0]}.\"\n assert B.dtype in [torch.float16, torch.int8]\n ptrOffset = get_ptr(offset)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.spmm_coo_very_sparse","uri":"program://bitsandbytes/function/bitsandbytes.functional.spmm_coo_very_sparse#L2128-L2199","kind":"function","name":"spmm_coo_very_sparse","path":"bitsandbytes/functional.py","language":"python","start_line":2128,"end_line":2199,"context_start_line":2108,"context_end_line":2202,"code":" is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out])\n lib.cspmm_coo(\n ptr,\n ptrRowidx,\n ptrColidx,\n ptrValues,\n cnnz,\n crowsA,\n ccolsA,\n ccolsB,\n cldb,\n ptrB,\n cldc,\n ptrC,\n ct.c_bool(transposed_B),\n )\n\n return out\n\n\ndef spmm_coo_very_sparse(cooA, B, dequant_stats=None, out=None):\n if out is None:\n out = torch.zeros((cooA.rows, B.shape[1]), device=B.device, dtype=cooA.values.dtype)\n nnz = cooA.nnz\n\n assert cooA.rowidx.numel() == nnz\n assert cooA.colidx.numel() == nnz\n assert cooA.values.numel() == nnz\n assert cooA.cols == B.shape[0], f\"{cooA.cols} vs {B.shape}\"\n\n _, counts = torch.unique(cooA.rowidx, return_counts=True)\n offset = counts.cumsum(0).int()\n max_count, max_idx = torch.sort(counts, descending=True)\n max_idx = max_idx.int()\n max_count = max_count.int()\n assert max_count[0] <= 32, f\"Current max count per row is 8 but found {max_count[0]}.\"\n assert B.dtype in [torch.float16, torch.int8]\n ptrOffset = get_ptr(offset)\n ptrMaxCount = get_ptr(max_count)\n ptrMaxIdx = get_ptr(max_idx)\n\n ptrRowidx = get_ptr(cooA.rowidx)\n ptrColidx = get_ptr(cooA.colidx)\n ptrValues = get_ptr(cooA.values)\n ptrB = get_ptr(B)\n ptrC = get_ptr(out)\n ptrDequantStats = get_ptr(dequant_stats)\n cnnz_rows = ct.c_int32(counts.numel())\n cnnz = ct.c_int32(cooA.nnz)\n crowsA = ct.c_int32(cooA.rows)\n crowsB = ct.c_int32(B.shape[1])\n ccolsB = ct.c_int32(B.shape[1])\n\n with _cuda_device_of(B):\n is_on_gpu([cooA.rowidx, cooA.colidx, cooA.values, B, out, dequant_stats])\n if B.dtype == torch.float16:\n lib.cspmm_coo_very_sparse_naive_fp16(\n ptrMaxCount,\n ptrMaxIdx,\n ptrOffset,\n ptrRowidx,\n ptrColidx,\n ptrValues,\n ptrB,\n ptrC,\n ptrDequantStats,\n cnnz_rows,\n cnnz,\n crowsA,\n crowsB,\n ccolsB,\n )\n elif B.dtype == torch.int8:\n lib.cspmm_coo_very_sparse_naive_int8(\n ptrMaxCount,\n ptrMaxIdx,\n ptrOffset,\n ptrRowidx,\n ptrColidx,\n ptrValues,\n ptrB,\n ptrC,\n ptrDequantStats,\n cnnz_rows,\n cnnz,\n crowsA,\n crowsB,\n ccolsB,\n )\n # else: assertion error\n\n return out\n\n\nC = 127.0","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.__init__","uri":"program://bitsandbytes/function/bitsandbytes.functional.__init__#L1931-L1944","kind":"function","name":"__init__","path":"bitsandbytes/functional.py","language":"python","start_line":1931,"end_line":1944,"context_start_line":1911,"context_end_line":1964,"code":"\n\nclass CSRSparseTensor:\n def __init__(self, rows, cols, nnz, rowptr, colidx, values):\n assert rowptr.dtype == torch.int32\n assert colidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert colidx.numel() == nnz\n assert rowptr.numel() == rows + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.rowptr = rowptr\n self.colidx = colidx\n self.values = values\n\n\nclass CSCSparseTensor:\n def __init__(self, rows, cols, nnz, colptr, rowidx, values):\n assert colptr.dtype == torch.int32\n assert rowidx.dtype == torch.int32\n assert values.dtype == torch.float16\n assert values.numel() == nnz\n assert rowidx.numel() == nnz\n assert colptr.numel() == cols + 1\n\n self.rows = rows\n self.cols = cols\n self.nnz = nnz\n self.colptr = colptr\n self.rowidx = rowidx\n self.values = values\n\n\ndef coo2csr(cooA):\n values, counts = torch.unique(cooA.rowidx, return_counts=True)\n values.add_(1)\n rowptr = torch.zeros((cooA.rows + 1,), dtype=torch.int32, device=cooA.rowidx.device)\n rowptr.scatter_(index=values.long(), src=counts.int(), dim=0)\n rowptr.cumsum_(0)\n return CSRSparseTensor(cooA.rows, cooA.cols, cooA.nnz, rowptr, cooA.colidx, cooA.values)\n\n\ndef coo2csc(cooA):\n val, col2rowidx = torch.sort(cooA.colidx)\n rowidx = cooA.rowidx[col2rowidx]\n values = cooA.values[col2rowidx]\n colvalues, counts = torch.unique(val, return_counts=True)\n colvalues.add_(1)\n colptr = torch.zeros((cooA.cols + 1,), dtype=torch.int32, device=cooA.colidx.device)\n colptr.scatter_(index=colvalues.long(), src=counts.int(), dim=0)\n colptr.cumsum_(0)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.initialize","uri":"program://bitsandbytes/function/bitsandbytes.functional.initialize#L106-L107","kind":"function","name":"initialize","path":"bitsandbytes/functional.py","language":"python","start_line":106,"end_line":107,"context_start_line":86,"context_end_line":127,"code":" if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:\n prev_device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n self.context[device.index] = ct.c_void_p(lib.get_context())\n torch.cuda.set_device(prev_device)\n return self.context[device.index]\n\n\nclass Cusparse_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = ct.c_void_p(lib.get_cusparse())\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)\n\n# When multiple GPUs are present, we use a context manager to\n# switch to the correct device of a tensor before invoking our CUDA\n# kernels in the C++ library. However, when there's only one device\n# there is no need to incur the overhead of cudaGetDevice/cudaSetDevice.\nif torch.cuda.device_count() > 1:\n\n def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_instance","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_instance#L110-L114","kind":"function","name":"get_instance","path":"bitsandbytes/functional.py","language":"python","start_line":110,"end_line":114,"context_start_line":90,"context_end_line":134,"code":"\n def get_context(self, device):\n if device.index not in self.context:\n prev_device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n self.context[device.index] = ct.c_void_p(lib.get_context())\n torch.cuda.set_device(prev_device)\n return self.context[device.index]\n\n\nclass Cusparse_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = ct.c_void_p(lib.get_cusparse())\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)\n\n# When multiple GPUs are present, we use a context manager to\n# switch to the correct device of a tensor before invoking our CUDA\n# kernels in the C++ library. However, when there's only one device\n# there is no need to incur the overhead of cudaGetDevice/cudaSetDevice.\nif torch.cuda.device_count() > 1:\n\n def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:\n import contextlib\n\n def _cuda_device_of(a: torch.Tensor):\n return contextlib.nullcontext()\n\n\ndef get_paged(*shape, dtype=torch.float32, device=FIRST_CUDA_DEVICE):","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.prefetch_all","uri":"program://bitsandbytes/function/bitsandbytes.functional.prefetch_all#L67-L72","kind":"function","name":"prefetch_all","path":"bitsandbytes/functional.py","language":"python","start_line":67,"end_line":72,"context_start_line":47,"context_end_line":92,"code":" ),\n}\n\n\nclass GlobalPageManager:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.paged_tensors = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def prefetch_all(self, to_cpu=False):\n # assume the first added, will be the\n # ones that are used first, so swap them in last\n # in the case they are evicted again\n for t in self.paged_tensors[::-1]:\n prefetch_tensor(t, to_cpu)\n\n\nclass CUBLAS_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = {}\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.get_context","uri":"program://bitsandbytes/function/bitsandbytes.functional.get_context#L91-L97","kind":"function","name":"get_context","path":"bitsandbytes/functional.py","language":"python","start_line":91,"end_line":97,"context_start_line":71,"context_end_line":117,"code":" for t in self.paged_tensors[::-1]:\n prefetch_tensor(t, to_cpu)\n\n\nclass CUBLAS_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = {}\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def get_context(self, device):\n if device.index not in self.context:\n prev_device = torch.cuda.current_device()\n torch.cuda.set_device(device)\n self.context[device.index] = ct.c_void_p(lib.get_context())\n torch.cuda.set_device(prev_device)\n return self.context[device.index]\n\n\nclass Cusparse_Context:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.context = ct.c_void_p(lib.get_cusparse())\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional._cuda_device_of","uri":"program://bitsandbytes/function/bitsandbytes.functional._cuda_device_of#L130-L131","kind":"function","name":"_cuda_device_of","path":"bitsandbytes/functional.py","language":"python","start_line":130,"end_line":131,"context_start_line":110,"context_end_line":151,"code":" def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n\nFIRST_CUDA_DEVICE = torch.device(\"cuda\", index=0)\n\n# When multiple GPUs are present, we use a context manager to\n# switch to the correct device of a tensor before invoking our CUDA\n# kernels in the C++ library. However, when there's only one device\n# there is no need to incur the overhead of cudaGetDevice/cudaSetDevice.\nif torch.cuda.device_count() > 1:\n\n def _cuda_device_of(a: torch.Tensor):\n return torch.cuda.device_of(a)\nelse:\n import contextlib\n\n def _cuda_device_of(a: torch.Tensor):\n return contextlib.nullcontext()\n\n\ndef get_paged(*shape, dtype=torch.float32, device=FIRST_CUDA_DEVICE):\n num_bytes = dtype.itemsize * prod(shape)\n cuda_ptr = lib.cget_managed_ptr(ct.c_size_t(num_bytes))\n c_ptr = ct.cast(cuda_ptr, ct.POINTER(ct.c_int))\n new_array = np.ctypeslib.as_array(c_ptr, shape=shape)\n out = torch.frombuffer(new_array, dtype=dtype, count=prod(shape)).view(shape)\n out.is_paged = True\n out.page_deviceid = device.index\n return out\n\n\ndef prefetch_tensor(A: torch.Tensor, to_cpu=False):\n assert A.is_paged, \"Only paged tensors can be prefetched!\"\n if to_cpu:\n deviceid = -1\n else:\n deviceid = A.page_deviceid\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.__getitem__","uri":"program://bitsandbytes/function/bitsandbytes.functional.__getitem__#L434-L452","kind":"function","name":"__getitem__","path":"bitsandbytes/functional.py","language":"python","start_line":434,"end_line":452,"context_start_line":414,"context_end_line":472,"code":" self,\n absmax,\n shape=None,\n code=None,\n blocksize=None,\n quant_type=None,\n dtype=None,\n offset=None,\n state2=None,\n ):\n self.absmax = absmax\n self.shape = shape\n self.code = code\n self.dtype = dtype\n self.blocksize = blocksize\n self.quant_type = quant_type\n self.offset = offset\n self.state2 = state2\n self.nested = state2 is not None\n\n def __getitem__(self, idx):\n \"\"\"\n ensures compatibility with older quant state scheme with nested lists.\n assumes the following layout:\n state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]\n state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]\n \"\"\"\n if self.nested:\n list_repr = [\n self.absmax,\n self.shape,\n self.dtype,\n self.blocksize,\n [self.offset, self.state2],\n self.quant_type,\n ]\n else:\n list_repr = [self.absmax, self.shape, self.dtype, self.blocksize, None, self.quant_type]\n return list_repr[idx]\n\n @classmethod\n def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> \"QuantState\":\n \"\"\"\n unpacks components of state_dict into QuantState\n where necessary, convert into strings, torch.dtype, ints, etc.\n\n qs_dict: based on state_dict, with only relevant keys, striped of prefixes.\n\n item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.\n \"\"\"\n\n # unpacking tensor with non-tensor components\n qs_key = [k for k, v in qs_dict.items() if \"quant_state\" in k and isinstance(v, torch.Tensor)]\n if not len(qs_key) and \"quant_type\" not in qs_dict:\n raise ValueError(\"Expected packed or unpacked quant_state items, found neither\")\n elif len(qs_key) != 1 or qs_key[0].split(\".\")[-1] not in cls.valid_qs_type_keys:\n raise ValueError(\n f\"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\\nDetected {qs_key}.\",\n )","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.from_dict","uri":"program://bitsandbytes/function/bitsandbytes.functional.from_dict#L455-L503","kind":"function","name":"from_dict","path":"bitsandbytes/functional.py","language":"python","start_line":455,"end_line":503,"context_start_line":435,"context_end_line":523,"code":" \"\"\"\n ensures compatibility with older quant state scheme with nested lists.\n assumes the following layout:\n state = [qabsmax, input_shape, A.dtype, blocksize, [offset, state2], quant_type]\n state2 = [absmax, input_shape, A.dtype, blocksize, None, quant_type]\n \"\"\"\n if self.nested:\n list_repr = [\n self.absmax,\n self.shape,\n self.dtype,\n self.blocksize,\n [self.offset, self.state2],\n self.quant_type,\n ]\n else:\n list_repr = [self.absmax, self.shape, self.dtype, self.blocksize, None, self.quant_type]\n return list_repr[idx]\n\n @classmethod\n def from_dict(cls, qs_dict: dict[str, Any], device: torch.device) -> \"QuantState\":\n \"\"\"\n unpacks components of state_dict into QuantState\n where necessary, convert into strings, torch.dtype, ints, etc.\n\n qs_dict: based on state_dict, with only relevant keys, striped of prefixes.\n\n item with key `quant_state.bitsandbytes__[nf4/fp4]` may contain minor and non-tensor quant state items.\n \"\"\"\n\n # unpacking tensor with non-tensor components\n qs_key = [k for k, v in qs_dict.items() if \"quant_state\" in k and isinstance(v, torch.Tensor)]\n if not len(qs_key) and \"quant_type\" not in qs_dict:\n raise ValueError(\"Expected packed or unpacked quant_state items, found neither\")\n elif len(qs_key) != 1 or qs_key[0].split(\".\")[-1] not in cls.valid_qs_type_keys:\n raise ValueError(\n f\"There should be exactly one `quant_state` item with ending from {cls.valid_qs_type_keys}.\\nDetected {qs_key}.\",\n )\n\n # unpacking minor and non-tensor quant state items if necessary\n if len(qs_key) == 1:\n first_qs_key = qs_key[0]\n qs_dict.update(unpack_tensor_to_dict(qs_dict.pop(first_qs_key)))\n\n qs_dict = {k.split(\".\")[-1]: v for k, v in qs_dict.items()} # strip prefixes\n assert set(qs_dict.keys()).issubset(cls.valid_qs_keys)\n\n if \"nested_absmax\" in qs_dict:\n offset = torch.tensor(float(qs_dict[\"nested_offset\"])).to(device)\n state2 = cls(\n absmax=qs_dict[\"nested_absmax\"].to(device),\n blocksize=qs_dict[\"nested_blocksize\"],\n code=qs_dict[\"nested_quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"nested_dtype\"]),\n )\n else:\n offset, state2 = None, None\n\n quant_state = cls(\n quant_type=qs_dict[\"quant_type\"],\n absmax=qs_dict[\"absmax\"].to(device),\n blocksize=qs_dict[\"blocksize\"],\n code=qs_dict[\"quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"dtype\"]),\n shape=torch.Size(qs_dict[\"shape\"]) if qs_dict[\"shape\"] is not None else None,\n offset=offset,\n state2=state2,\n )\n return quant_state\n\n def as_dict(self, packed=False):\n \"\"\"\n returns dict of tensors and strings to use in serialization via _save_to_state_dict()\n param: packed -- returns dict[str, torch.Tensor] for state_dict fit for safetensors saving\n \"\"\"\n qs_dict = {\n \"quant_type\": self.quant_type,\n \"absmax\": self.absmax,\n \"blocksize\": self.blocksize,\n \"quant_map\": self.code,\n \"dtype\": str(self.dtype).strip(\"torch.\"),\n \"shape\": tuple(self.shape),\n }\n if self.nested:\n qs_dict.update(\n {\n \"nested_absmax\": self.state2.absmax,\n \"nested_blocksize\": self.state2.blocksize,\n \"nested_quant_map\": self.state2.code.clone(), # un-shared to avoid restoring it after shared tensors are removed by safetensors","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.as_dict","uri":"program://bitsandbytes/function/bitsandbytes.functional.as_dict#L505-L535","kind":"function","name":"as_dict","path":"bitsandbytes/functional.py","language":"python","start_line":505,"end_line":535,"context_start_line":485,"context_end_line":555,"code":" absmax=qs_dict[\"nested_absmax\"].to(device),\n blocksize=qs_dict[\"nested_blocksize\"],\n code=qs_dict[\"nested_quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"nested_dtype\"]),\n )\n else:\n offset, state2 = None, None\n\n quant_state = cls(\n quant_type=qs_dict[\"quant_type\"],\n absmax=qs_dict[\"absmax\"].to(device),\n blocksize=qs_dict[\"blocksize\"],\n code=qs_dict[\"quant_map\"].to(device),\n dtype=getattr(torch, qs_dict[\"dtype\"]),\n shape=torch.Size(qs_dict[\"shape\"]) if qs_dict[\"shape\"] is not None else None,\n offset=offset,\n state2=state2,\n )\n return quant_state\n\n def as_dict(self, packed=False):\n \"\"\"\n returns dict of tensors and strings to use in serialization via _save_to_state_dict()\n param: packed -- returns dict[str, torch.Tensor] for state_dict fit for safetensors saving\n \"\"\"\n qs_dict = {\n \"quant_type\": self.quant_type,\n \"absmax\": self.absmax,\n \"blocksize\": self.blocksize,\n \"quant_map\": self.code,\n \"dtype\": str(self.dtype).strip(\"torch.\"),\n \"shape\": tuple(self.shape),\n }\n if self.nested:\n qs_dict.update(\n {\n \"nested_absmax\": self.state2.absmax,\n \"nested_blocksize\": self.state2.blocksize,\n \"nested_quant_map\": self.state2.code.clone(), # un-shared to avoid restoring it after shared tensors are removed by safetensors\n \"nested_dtype\": str(self.state2.dtype).strip(\"torch.\"),\n \"nested_offset\": self.offset.item(),\n },\n )\n if not packed:\n return qs_dict\n\n # packed format allows serialization of non-tensor components, critical for saving in safetensors format\n qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}\n non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}\n qs_packed_dict[\"quant_state.\" + \"bitsandbytes__\" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)\n return qs_packed_dict\n\n def to(self, device):\n # make sure the quantization state is on the right device\n self.code = self.code.to(device)\n self.absmax = self.absmax.to(device)\n if self.nested:\n self.offset = self.offset.to(device)\n self.state2.absmax = self.state2.absmax.to(device)\n self.state2.code = self.state2.code.to(device)\n\n def __eq__(self, other):\n if not isinstance(other, QuantState):\n return False\n\n return (\n torch.allclose(self.absmax, other.absmax, atol=1e-6)\n and self.shape == other.shape\n and torch.allclose(self.code, other.code, atol=1e-6)\n and self.dtype == other.dtype\n and self.blocksize == other.blocksize","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.to","uri":"program://bitsandbytes/function/bitsandbytes.functional.to#L537-L544","kind":"function","name":"to","path":"bitsandbytes/functional.py","language":"python","start_line":537,"end_line":544,"context_start_line":517,"context_end_line":564,"code":" }\n if self.nested:\n qs_dict.update(\n {\n \"nested_absmax\": self.state2.absmax,\n \"nested_blocksize\": self.state2.blocksize,\n \"nested_quant_map\": self.state2.code.clone(), # un-shared to avoid restoring it after shared tensors are removed by safetensors\n \"nested_dtype\": str(self.state2.dtype).strip(\"torch.\"),\n \"nested_offset\": self.offset.item(),\n },\n )\n if not packed:\n return qs_dict\n\n # packed format allows serialization of non-tensor components, critical for saving in safetensors format\n qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}\n non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}\n qs_packed_dict[\"quant_state.\" + \"bitsandbytes__\" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)\n return qs_packed_dict\n\n def to(self, device):\n # make sure the quantization state is on the right device\n self.code = self.code.to(device)\n self.absmax = self.absmax.to(device)\n if self.nested:\n self.offset = self.offset.to(device)\n self.state2.absmax = self.state2.absmax.to(device)\n self.state2.code = self.state2.code.to(device)\n\n def __eq__(self, other):\n if not isinstance(other, QuantState):\n return False\n\n return (\n torch.allclose(self.absmax, other.absmax, atol=1e-6)\n and self.shape == other.shape\n and torch.allclose(self.code, other.code, atol=1e-6)\n and self.dtype == other.dtype\n and self.blocksize == other.blocksize\n and self.quant_type == other.quant_type\n and (\n self.offset == other.offset\n if self.offset is not None and other.offset is not None\n else self.offset is other.offset\n )\n and (\n self.state2 == other.state2\n if self.state2 is not None and other.state2 is not None","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.functional.__eq__","uri":"program://bitsandbytes/function/bitsandbytes.functional.__eq__#L546-L567","kind":"function","name":"__eq__","path":"bitsandbytes/functional.py","language":"python","start_line":546,"end_line":567,"context_start_line":526,"context_end_line":587,"code":" },\n )\n if not packed:\n return qs_dict\n\n # packed format allows serialization of non-tensor components, critical for saving in safetensors format\n qs_packed_dict = {k: v for k, v in qs_dict.items() if isinstance(v, torch.Tensor)}\n non_tensor_dict = {k: v for k, v in qs_dict.items() if not isinstance(v, torch.Tensor)}\n qs_packed_dict[\"quant_state.\" + \"bitsandbytes__\" + self.quant_type] = pack_dict_to_tensor(non_tensor_dict)\n return qs_packed_dict\n\n def to(self, device):\n # make sure the quantization state is on the right device\n self.code = self.code.to(device)\n self.absmax = self.absmax.to(device)\n if self.nested:\n self.offset = self.offset.to(device)\n self.state2.absmax = self.state2.absmax.to(device)\n self.state2.code = self.state2.code.to(device)\n\n def __eq__(self, other):\n if not isinstance(other, QuantState):\n return False\n\n return (\n torch.allclose(self.absmax, other.absmax, atol=1e-6)\n and self.shape == other.shape\n and torch.allclose(self.code, other.code, atol=1e-6)\n and self.dtype == other.dtype\n and self.blocksize == other.blocksize\n and self.quant_type == other.quant_type\n and (\n self.offset == other.offset\n if self.offset is not None and other.offset is not None\n else self.offset is other.offset\n )\n and (\n self.state2 == other.state2\n if self.state2 is not None and other.state2 is not None\n else self.state2 is other.state2\n )\n )\n\n\ndef quantize_blockwise(\n A: torch.Tensor,\n code: Optional[torch.Tensor] = None,\n absmax: Optional[torch.Tensor] = None,\n out: Optional[torch.Tensor] = None,\n blocksize=4096,\n nested=False,\n) -> tuple[torch.Tensor, QuantState]:\n \"\"\"Quantize a tensor in blocks of values.\n\n The input tensor is quantized by dividing it into blocks of `blocksize` values.\n The the absolute maximum value within these blocks is calculated for scaling\n the non-linear quantization.\n\n Args:\n A (`torch.Tensor`): The input tensor. Supports `float16`, `bfloat16`, or `float32` datatypes.\n code (`torch.Tensor`, *optional*):\n A mapping describing the low-bit data type. Defaults to a signed 8-bit dynamic type.","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.__main__","uri":"program://bitsandbytes/module/bitsandbytes.__main__#L1-L4","kind":"module","name":"bitsandbytes.__main__","path":"bitsandbytes/__main__.py","language":"python","start_line":1,"end_line":4,"context_start_line":1,"context_end_line":4,"code":"if __name__ == \"__main__\":\n from bitsandbytes.diagnostics.main import main\n\n main()","source_hash":"037880f501fcc9f9feeb4b8deb0ffb5948ae44833befbad9fe97ff1ec66db1ee","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils","uri":"program://bitsandbytes/module/bitsandbytes.utils#L1-L205","kind":"module","name":"bitsandbytes.utils","path":"bitsandbytes/utils.py","language":"python","start_line":1,"end_line":205,"context_start_line":1,"context_end_line":205,"code":"import json\nimport shlex\nimport subprocess\n\nimport torch\n\n\ndef outlier_hook(module, input):\n assert isinstance(module, torch.nn.Linear)\n tracer = OutlierTracer.get_instance()\n hvalue = tracer.get_hvalue(module.weight)\n if hvalue not in tracer.hvalue2outlier_idx:\n outlier_idx = find_outlier_dims(module.weight)\n tracer.outliers.append(outlier_idx)\n tracer.hvalues.append(hvalue)\n if len(tracer.outliers) > 1:\n # assign the current layer the outlier idx found from the weight\n # of the previous linear layer\n if tracer.outliers[-1].numel() > 0:\n assert tracer.outliers[-1].max() < module.weight.shape[1]\n tracer.hvalue2outlier_idx[hvalue] = tracer.outliers[-1]\n\n else:\n # first layer, we cannot use the weight for outlier detection\n # we follow a mixed approach:\n # (1) zscore test of std of hidden dimension\n # (2) magnitude > 6 test\n merged = input[0].view(-1, input[0].shape[-1])\n # (1) zscore test of std of hidden dimension\n outlier_idx = find_outlier_dims(merged, reduction_dim=1, zscore=3)\n # (2) magnitude > 6 test\n dims = (torch.abs(input[0]) > 6).sum(dim=list(range(len(input[0].shape) - 1)))\n outlier_idx2 = torch.where(dims > 0)[0]\n outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique()\n tracer.hvalue2outlier_idx[hvalue] = outlier_idx\n else:\n for hook in tracer.hooks:\n hook.remove()\n\n\nclass OutlierTracer:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n\ndef execute_and_return(command_string: str) -> tuple[str, str]:\n def _decode(subprocess_err_out_tuple):\n return tuple(to_decode.decode(\"UTF-8\").strip() for to_decode in subprocess_err_out_tuple)\n\n def execute_and_return_decoded_std_streams(command_string):\n return _decode(\n subprocess.Popen(\n shlex.split(command_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate(),\n )\n\n std_out, std_err = execute_and_return_decoded_std_streams(command_string)\n return std_out, std_err\n\n\ndef replace_linear(\n model,\n linear_replacement,\n skip_modules=(\"lm_head\",),\n copy_weights=False,\n post_processing_function=None,\n):\n \"\"\"\n Replace linear modules with a new Linear module.\n Parameters:\n model (`torch.nn.Module`):\n Input model or `torch.nn.Module` as the function is run recursively.\n linear_replacement (`torch.nn.Module`):\n The linear module that replaces the old one. Only expects standard arguments.\n If other arguments need to be passed, use a lambda.\n skip_modules (`List[str]`, *optional*, defaults to `lm_head`):\n List of modules names not to convert. Defaults to `lm_head`.\n copy_weights (`bool`):\n Copy the weights from the old linear module to the new one\n post_processing_function (`str`):\n A function name of the replacement linear class that is called\n after processing.\n \"\"\"\n for name, module in model.named_children():\n if len(list(module.children())) > 0:\n replace_linear(module, linear_replacement, skip_modules, copy_weights, post_processing_function)\n\n if isinstance(module, torch.nn.Linear) and name not in skip_modules:\n old_module = model._modules[name]\n model._modules[name] = linear_replacement(\n module.in_features,\n module.out_features,\n module.bias is not None,\n )\n if copy_weights:\n model._modules[name].weight = old_module.weight\n model._modules[name].bias = old_module.bias\n\n if post_processing_function is not None:\n func = getattr(module, post_processing_function, None)\n if func is not None:\n func(module)\n return model\n\n\ndef pack_dict_to_tensor(source_dict):\n \"\"\"\n Pack a dictionary into a torch tensor for storing quant_state items in state_dict.\n\n Parameters:\n - source_dict: The dictionary to be packed.\n\n Returns:\n A torch tensor containing the packed data.\n \"\"\"\n json_str = json.dumps(source_dict)\n json_bytes = json_str.encode(\"utf-8\")\n tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)\n\n return tensor_data\n\n\ndef unpack_tensor_to_dict(tensor_data):\n \"\"\"\n Unpack a torch tensor into a Python dictionary.\n\n Parameters:\n - tensor_data: The torch tensor containing the packed data.\n\n Returns:\n A Python dictionary containing the unpacked data.\n \"\"\"\n json_bytes = bytes(tensor_data.cpu().numpy())\n json_str = json_bytes.decode(\"utf-8\")\n unpacked_dict = json.loads(json_str)\n\n return unpacked_dict\n\n\nLINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {\"row\": 0, \"col32\": 1, \"col_turing\": 2, \"col_ampere\": 3}\nINVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {val: name for (name, val) in LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING.items()}\n\n\ndef sync_gpu(t: torch.Tensor):\n if t.device.type == \"cuda\":\n torch.cuda.synchronize()\n elif t.device.type == \"xpu\":\n torch.xpu.synchronize()","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.outlier_hook","uri":"program://bitsandbytes/function/bitsandbytes.utils.outlier_hook#L8-L38","kind":"function","name":"outlier_hook","path":"bitsandbytes/utils.py","language":"python","start_line":8,"end_line":38,"context_start_line":1,"context_end_line":58,"code":"import json\nimport shlex\nimport subprocess\n\nimport torch\n\n\ndef outlier_hook(module, input):\n assert isinstance(module, torch.nn.Linear)\n tracer = OutlierTracer.get_instance()\n hvalue = tracer.get_hvalue(module.weight)\n if hvalue not in tracer.hvalue2outlier_idx:\n outlier_idx = find_outlier_dims(module.weight)\n tracer.outliers.append(outlier_idx)\n tracer.hvalues.append(hvalue)\n if len(tracer.outliers) > 1:\n # assign the current layer the outlier idx found from the weight\n # of the previous linear layer\n if tracer.outliers[-1].numel() > 0:\n assert tracer.outliers[-1].max() < module.weight.shape[1]\n tracer.hvalue2outlier_idx[hvalue] = tracer.outliers[-1]\n\n else:\n # first layer, we cannot use the weight for outlier detection\n # we follow a mixed approach:\n # (1) zscore test of std of hidden dimension\n # (2) magnitude > 6 test\n merged = input[0].view(-1, input[0].shape[-1])\n # (1) zscore test of std of hidden dimension\n outlier_idx = find_outlier_dims(merged, reduction_dim=1, zscore=3)\n # (2) magnitude > 6 test\n dims = (torch.abs(input[0]) > 6).sum(dim=list(range(len(input[0].shape) - 1)))\n outlier_idx2 = torch.where(dims > 0)[0]\n outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique()\n tracer.hvalue2outlier_idx[hvalue] = outlier_idx\n else:\n for hook in tracer.hooks:\n hook.remove()\n\n\nclass OutlierTracer:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.OutlierTracer","uri":"program://bitsandbytes/class/bitsandbytes.utils.OutlierTracer#L41-L80","kind":"class","name":"OutlierTracer","path":"bitsandbytes/utils.py","language":"python","start_line":41,"end_line":80,"context_start_line":21,"context_end_line":100,"code":" tracer.hvalue2outlier_idx[hvalue] = tracer.outliers[-1]\n\n else:\n # first layer, we cannot use the weight for outlier detection\n # we follow a mixed approach:\n # (1) zscore test of std of hidden dimension\n # (2) magnitude > 6 test\n merged = input[0].view(-1, input[0].shape[-1])\n # (1) zscore test of std of hidden dimension\n outlier_idx = find_outlier_dims(merged, reduction_dim=1, zscore=3)\n # (2) magnitude > 6 test\n dims = (torch.abs(input[0]) > 6).sum(dim=list(range(len(input[0].shape) - 1)))\n outlier_idx2 = torch.where(dims > 0)[0]\n outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique()\n tracer.hvalue2outlier_idx[hvalue] = outlier_idx\n else:\n for hook in tracer.hooks:\n hook.remove()\n\n\nclass OutlierTracer:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.find_outlier_dims","uri":"program://bitsandbytes/function/bitsandbytes.utils.find_outlier_dims#L83-L98","kind":"function","name":"find_outlier_dims","path":"bitsandbytes/utils.py","language":"python","start_line":83,"end_line":98,"context_start_line":63,"context_end_line":118,"code":" def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n\ndef execute_and_return(command_string: str) -> tuple[str, str]:\n def _decode(subprocess_err_out_tuple):\n return tuple(to_decode.decode(\"UTF-8\").strip() for to_decode in subprocess_err_out_tuple)\n\n def execute_and_return_decoded_std_streams(command_string):\n return _decode(\n subprocess.Popen(\n shlex.split(command_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate(),\n )\n\n std_out, std_err = execute_and_return_decoded_std_streams(command_string)\n return std_out, std_err\n\n\ndef replace_linear(","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.execute_and_return","uri":"program://bitsandbytes/function/bitsandbytes.utils.execute_and_return#L101-L115","kind":"function","name":"execute_and_return","path":"bitsandbytes/utils.py","language":"python","start_line":101,"end_line":115,"context_start_line":81,"context_end_line":135,"code":"\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n\ndef execute_and_return(command_string: str) -> tuple[str, str]:\n def _decode(subprocess_err_out_tuple):\n return tuple(to_decode.decode(\"UTF-8\").strip() for to_decode in subprocess_err_out_tuple)\n\n def execute_and_return_decoded_std_streams(command_string):\n return _decode(\n subprocess.Popen(\n shlex.split(command_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate(),\n )\n\n std_out, std_err = execute_and_return_decoded_std_streams(command_string)\n return std_out, std_err\n\n\ndef replace_linear(\n model,\n linear_replacement,\n skip_modules=(\"lm_head\",),\n copy_weights=False,\n post_processing_function=None,\n):\n \"\"\"\n Replace linear modules with a new Linear module.\n Parameters:\n model (`torch.nn.Module`):\n Input model or `torch.nn.Module` as the function is run recursively.\n linear_replacement (`torch.nn.Module`):\n The linear module that replaces the old one. Only expects standard arguments.\n If other arguments need to be passed, use a lambda.\n skip_modules (`List[str]`, *optional*, defaults to `lm_head`):\n List of modules names not to convert. Defaults to `lm_head`.\n copy_weights (`bool`):","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.replace_linear","uri":"program://bitsandbytes/function/bitsandbytes.utils.replace_linear#L118-L160","kind":"function","name":"replace_linear","path":"bitsandbytes/utils.py","language":"python","start_line":118,"end_line":160,"context_start_line":98,"context_end_line":180,"code":" return idx\n\n\ndef execute_and_return(command_string: str) -> tuple[str, str]:\n def _decode(subprocess_err_out_tuple):\n return tuple(to_decode.decode(\"UTF-8\").strip() for to_decode in subprocess_err_out_tuple)\n\n def execute_and_return_decoded_std_streams(command_string):\n return _decode(\n subprocess.Popen(\n shlex.split(command_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate(),\n )\n\n std_out, std_err = execute_and_return_decoded_std_streams(command_string)\n return std_out, std_err\n\n\ndef replace_linear(\n model,\n linear_replacement,\n skip_modules=(\"lm_head\",),\n copy_weights=False,\n post_processing_function=None,\n):\n \"\"\"\n Replace linear modules with a new Linear module.\n Parameters:\n model (`torch.nn.Module`):\n Input model or `torch.nn.Module` as the function is run recursively.\n linear_replacement (`torch.nn.Module`):\n The linear module that replaces the old one. Only expects standard arguments.\n If other arguments need to be passed, use a lambda.\n skip_modules (`List[str]`, *optional*, defaults to `lm_head`):\n List of modules names not to convert. Defaults to `lm_head`.\n copy_weights (`bool`):\n Copy the weights from the old linear module to the new one\n post_processing_function (`str`):\n A function name of the replacement linear class that is called\n after processing.\n \"\"\"\n for name, module in model.named_children():\n if len(list(module.children())) > 0:\n replace_linear(module, linear_replacement, skip_modules, copy_weights, post_processing_function)\n\n if isinstance(module, torch.nn.Linear) and name not in skip_modules:\n old_module = model._modules[name]\n model._modules[name] = linear_replacement(\n module.in_features,\n module.out_features,\n module.bias is not None,\n )\n if copy_weights:\n model._modules[name].weight = old_module.weight\n model._modules[name].bias = old_module.bias\n\n if post_processing_function is not None:\n func = getattr(module, post_processing_function, None)\n if func is not None:\n func(module)\n return model\n\n\ndef pack_dict_to_tensor(source_dict):\n \"\"\"\n Pack a dictionary into a torch tensor for storing quant_state items in state_dict.\n\n Parameters:\n - source_dict: The dictionary to be packed.\n\n Returns:\n A torch tensor containing the packed data.\n \"\"\"\n json_str = json.dumps(source_dict)\n json_bytes = json_str.encode(\"utf-8\")\n tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)\n\n return tensor_data\n\n\ndef unpack_tensor_to_dict(tensor_data):","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.pack_dict_to_tensor","uri":"program://bitsandbytes/function/bitsandbytes.utils.pack_dict_to_tensor#L163-L177","kind":"function","name":"pack_dict_to_tensor","path":"bitsandbytes/utils.py","language":"python","start_line":163,"end_line":177,"context_start_line":143,"context_end_line":197,"code":" replace_linear(module, linear_replacement, skip_modules, copy_weights, post_processing_function)\n\n if isinstance(module, torch.nn.Linear) and name not in skip_modules:\n old_module = model._modules[name]\n model._modules[name] = linear_replacement(\n module.in_features,\n module.out_features,\n module.bias is not None,\n )\n if copy_weights:\n model._modules[name].weight = old_module.weight\n model._modules[name].bias = old_module.bias\n\n if post_processing_function is not None:\n func = getattr(module, post_processing_function, None)\n if func is not None:\n func(module)\n return model\n\n\ndef pack_dict_to_tensor(source_dict):\n \"\"\"\n Pack a dictionary into a torch tensor for storing quant_state items in state_dict.\n\n Parameters:\n - source_dict: The dictionary to be packed.\n\n Returns:\n A torch tensor containing the packed data.\n \"\"\"\n json_str = json.dumps(source_dict)\n json_bytes = json_str.encode(\"utf-8\")\n tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)\n\n return tensor_data\n\n\ndef unpack_tensor_to_dict(tensor_data):\n \"\"\"\n Unpack a torch tensor into a Python dictionary.\n\n Parameters:\n - tensor_data: The torch tensor containing the packed data.\n\n Returns:\n A Python dictionary containing the unpacked data.\n \"\"\"\n json_bytes = bytes(tensor_data.cpu().numpy())\n json_str = json_bytes.decode(\"utf-8\")\n unpacked_dict = json.loads(json_str)\n\n return unpacked_dict\n\n\nLINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {\"row\": 0, \"col32\": 1, \"col_turing\": 2, \"col_ampere\": 3}","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.unpack_tensor_to_dict","uri":"program://bitsandbytes/function/bitsandbytes.utils.unpack_tensor_to_dict#L180-L194","kind":"function","name":"unpack_tensor_to_dict","path":"bitsandbytes/utils.py","language":"python","start_line":180,"end_line":194,"context_start_line":160,"context_end_line":205,"code":" return model\n\n\ndef pack_dict_to_tensor(source_dict):\n \"\"\"\n Pack a dictionary into a torch tensor for storing quant_state items in state_dict.\n\n Parameters:\n - source_dict: The dictionary to be packed.\n\n Returns:\n A torch tensor containing the packed data.\n \"\"\"\n json_str = json.dumps(source_dict)\n json_bytes = json_str.encode(\"utf-8\")\n tensor_data = torch.tensor(list(json_bytes), dtype=torch.uint8)\n\n return tensor_data\n\n\ndef unpack_tensor_to_dict(tensor_data):\n \"\"\"\n Unpack a torch tensor into a Python dictionary.\n\n Parameters:\n - tensor_data: The torch tensor containing the packed data.\n\n Returns:\n A Python dictionary containing the unpacked data.\n \"\"\"\n json_bytes = bytes(tensor_data.cpu().numpy())\n json_str = json_bytes.decode(\"utf-8\")\n unpacked_dict = json.loads(json_str)\n\n return unpacked_dict\n\n\nLINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {\"row\": 0, \"col32\": 1, \"col_turing\": 2, \"col_ampere\": 3}\nINVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {val: name for (name, val) in LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING.items()}\n\n\ndef sync_gpu(t: torch.Tensor):\n if t.device.type == \"cuda\":\n torch.cuda.synchronize()\n elif t.device.type == \"xpu\":\n torch.xpu.synchronize()","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.sync_gpu","uri":"program://bitsandbytes/function/bitsandbytes.utils.sync_gpu#L201-L205","kind":"function","name":"sync_gpu","path":"bitsandbytes/utils.py","language":"python","start_line":201,"end_line":205,"context_start_line":181,"context_end_line":205,"code":" \"\"\"\n Unpack a torch tensor into a Python dictionary.\n\n Parameters:\n - tensor_data: The torch tensor containing the packed data.\n\n Returns:\n A Python dictionary containing the unpacked data.\n \"\"\"\n json_bytes = bytes(tensor_data.cpu().numpy())\n json_str = json_bytes.decode(\"utf-8\")\n unpacked_dict = json.loads(json_str)\n\n return unpacked_dict\n\n\nLINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {\"row\": 0, \"col32\": 1, \"col_turing\": 2, \"col_ampere\": 3}\nINVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING = {val: name for (name, val) in LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING.items()}\n\n\ndef sync_gpu(t: torch.Tensor):\n if t.device.type == \"cuda\":\n torch.cuda.synchronize()\n elif t.device.type == \"xpu\":\n torch.xpu.synchronize()","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.__init__","uri":"program://bitsandbytes/function/bitsandbytes.utils.__init__#L44-L45","kind":"function","name":"__init__","path":"bitsandbytes/utils.py","language":"python","start_line":44,"end_line":45,"context_start_line":24,"context_end_line":65,"code":" # first layer, we cannot use the weight for outlier detection\n # we follow a mixed approach:\n # (1) zscore test of std of hidden dimension\n # (2) magnitude > 6 test\n merged = input[0].view(-1, input[0].shape[-1])\n # (1) zscore test of std of hidden dimension\n outlier_idx = find_outlier_dims(merged, reduction_dim=1, zscore=3)\n # (2) magnitude > 6 test\n dims = (torch.abs(input[0]) > 6).sum(dim=list(range(len(input[0].shape) - 1)))\n outlier_idx2 = torch.where(dims > 0)[0]\n outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique()\n tracer.hvalue2outlier_idx[hvalue] = outlier_idx\n else:\n for hook in tracer.hooks:\n hook.remove()\n\n\nclass OutlierTracer:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.initialize","uri":"program://bitsandbytes/function/bitsandbytes.utils.initialize#L47-L58","kind":"function","name":"initialize","path":"bitsandbytes/utils.py","language":"python","start_line":47,"end_line":58,"context_start_line":27,"context_end_line":78,"code":" # (2) magnitude > 6 test\n merged = input[0].view(-1, input[0].shape[-1])\n # (1) zscore test of std of hidden dimension\n outlier_idx = find_outlier_dims(merged, reduction_dim=1, zscore=3)\n # (2) magnitude > 6 test\n dims = (torch.abs(input[0]) > 6).sum(dim=list(range(len(input[0].shape) - 1)))\n outlier_idx2 = torch.where(dims > 0)[0]\n outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique()\n tracer.hvalue2outlier_idx[hvalue] = outlier_idx\n else:\n for hook in tracer.hooks:\n hook.remove()\n\n\nclass OutlierTracer:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.is_initialized","uri":"program://bitsandbytes/function/bitsandbytes.utils.is_initialized#L60-L61","kind":"function","name":"is_initialized","path":"bitsandbytes/utils.py","language":"python","start_line":60,"end_line":61,"context_start_line":40,"context_end_line":81,"code":"\nclass OutlierTracer:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.get_hvalue","uri":"program://bitsandbytes/function/bitsandbytes.utils.get_hvalue#L63-L64","kind":"function","name":"get_hvalue","path":"bitsandbytes/utils.py","language":"python","start_line":63,"end_line":64,"context_start_line":43,"context_end_line":84,"code":"\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.get_outliers","uri":"program://bitsandbytes/function/bitsandbytes.utils.get_outliers#L66-L74","kind":"function","name":"get_outliers","path":"bitsandbytes/utils.py","language":"python","start_line":66,"end_line":74,"context_start_line":46,"context_end_line":94,"code":"\n def initialize(self, model):\n self.last_w = None\n self.current_outlier_dims = None\n self.hvalues = []\n self.outliers = []\n self.hvalue2outlier_idx = {}\n self.initialized = True\n self.hooks = []\n\n for n, m in model.named_modules():\n if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.get_instance","uri":"program://bitsandbytes/function/bitsandbytes.utils.get_instance#L77-L80","kind":"function","name":"get_instance","path":"bitsandbytes/utils.py","language":"python","start_line":77,"end_line":80,"context_start_line":57,"context_end_line":100,"code":" if isinstance(m, torch.nn.Linear):\n self.hooks.append(m.register_forward_pre_hook(outlier_hook))\n\n def is_initialized(self):\n return getattr(self, \"initialized\", False)\n\n def get_hvalue(self, weight):\n return weight.data.storage().data_ptr()\n\n def get_outliers(self, weight):\n if not self.is_initialized():\n print(\"Outlier tracer is not initialized...\")\n return None\n hvalue = self.get_hvalue(weight)\n if hvalue in self.hvalue2outlier_idx:\n return self.hvalue2outlier_idx[hvalue]\n else:\n return None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n return cls._instance\n\n\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils._decode","uri":"program://bitsandbytes/function/bitsandbytes.utils._decode#L102-L103","kind":"function","name":"_decode","path":"bitsandbytes/utils.py","language":"python","start_line":102,"end_line":103,"context_start_line":82,"context_end_line":123,"code":"\ndef find_outlier_dims(weight, reduction_dim=0, zscore=4.0, topk=None, rdm=False):\n if rdm:\n return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n\ndef execute_and_return(command_string: str) -> tuple[str, str]:\n def _decode(subprocess_err_out_tuple):\n return tuple(to_decode.decode(\"UTF-8\").strip() for to_decode in subprocess_err_out_tuple)\n\n def execute_and_return_decoded_std_streams(command_string):\n return _decode(\n subprocess.Popen(\n shlex.split(command_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate(),\n )\n\n std_out, std_err = execute_and_return_decoded_std_streams(command_string)\n return std_out, std_err\n\n\ndef replace_linear(\n model,\n linear_replacement,\n skip_modules=(\"lm_head\",),\n copy_weights=False,\n post_processing_function=None,","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.utils.execute_and_return_decoded_std_streams","uri":"program://bitsandbytes/function/bitsandbytes.utils.execute_and_return_decoded_std_streams#L105-L112","kind":"function","name":"execute_and_return_decoded_std_streams","path":"bitsandbytes/utils.py","language":"python","start_line":105,"end_line":112,"context_start_line":85,"context_end_line":132,"code":" return torch.randint(0, weight.shape[1], size=(topk,), device=weight.device).long()\n\n std = weight.std(reduction_dim)\n stdm = std.mean()\n stdstd = std.std()\n\n zstd = (std - stdm) / stdstd\n\n if topk is not None:\n val, idx = torch.topk(std.abs(), k=topk, dim=0)\n else:\n idx = torch.where(zstd > zscore)[0]\n\n return idx\n\n\ndef execute_and_return(command_string: str) -> tuple[str, str]:\n def _decode(subprocess_err_out_tuple):\n return tuple(to_decode.decode(\"UTF-8\").strip() for to_decode in subprocess_err_out_tuple)\n\n def execute_and_return_decoded_std_streams(command_string):\n return _decode(\n subprocess.Popen(\n shlex.split(command_string),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n ).communicate(),\n )\n\n std_out, std_err = execute_and_return_decoded_std_streams(command_string)\n return std_out, std_err\n\n\ndef replace_linear(\n model,\n linear_replacement,\n skip_modules=(\"lm_head\",),\n copy_weights=False,\n post_processing_function=None,\n):\n \"\"\"\n Replace linear modules with a new Linear module.\n Parameters:\n model (`torch.nn.Module`):\n Input model or `torch.nn.Module` as the function is run recursively.\n linear_replacement (`torch.nn.Module`):\n The linear module that replaces the old one. Only expects standard arguments.\n If other arguments need to be passed, use a lambda.","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.consts","uri":"program://bitsandbytes/module/bitsandbytes.consts#L1-L12","kind":"module","name":"bitsandbytes.consts","path":"bitsandbytes/consts.py","language":"python","start_line":1,"end_line":12,"context_start_line":1,"context_end_line":12,"code":"from pathlib import Path\nimport platform\n\nDYNAMIC_LIBRARY_SUFFIX = {\n \"Darwin\": \".dylib\",\n \"Linux\": \".so\",\n \"Windows\": \".dll\",\n}.get(platform.system(), \".so\")\n\nPACKAGE_DIR = Path(__file__).parent\nPACKAGE_GITHUB_URL = \"https://github.com/TimDettmers/bitsandbytes\"\nNONPYTORCH_DOC_URL = \"https://github.com/TimDettmers/bitsandbytes/blob/main/docs/source/nonpytorchcuda.mdx\"","source_hash":"c5f0da75001ac7e99ac6a907ad535b67a5084c9a12d1781c51fa299a32e08a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension","uri":"program://bitsandbytes/module/bitsandbytes.cextension#L1-L325","kind":"module","name":"bitsandbytes.cextension","path":"bitsandbytes/cextension.py","language":"python","start_line":1,"end_line":325,"context_start_line":1,"context_end_line":325,"code":"import ctypes as ct\nimport functools\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nfrom typing import Optional\n\nimport torch\n\nfrom bitsandbytes.consts import DYNAMIC_LIBRARY_SUFFIX, PACKAGE_DIR\nfrom bitsandbytes.cuda_specs import CUDASpecs, get_cuda_specs, get_cuda_version_tuple, get_rocm_gpu_arch\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_cuda_bnb_library_path(cuda_specs: CUDASpecs) -> Path:\n \"\"\"\n Get the disk path to the CUDA BNB native library specified by the\n given CUDA specs, taking into account the `BNB_CUDA_VERSION` override environment variable.\n\n The library is not guaranteed to exist at the returned path.\n \"\"\"\n\n prefix = \"rocm\" if torch.version.hip else \"cuda\"\n library_name = f\"libbitsandbytes_{prefix}{cuda_specs.cuda_version_string}{DYNAMIC_LIBRARY_SUFFIX}\"\n\n override_value = os.environ.get(\"BNB_CUDA_VERSION\")\n if override_value:\n library_name = re.sub(r\"cuda\\d+\", f\"cuda{override_value}\", library_name, count=1)\n if torch.version.hip:\n raise RuntimeError(\n f\"BNB_CUDA_VERSION={override_value} detected for ROCm!! \\n\"\n f\"Clear the variable and retry: export BNB_CUDA_VERSION=\\n\"\n )\n logger.warning(\n f\"WARNING: BNB_CUDA_VERSION={override_value} environment variable detected; loading {library_name}.\\n\"\n \"This can be used to load a bitsandbytes version built with a CUDA version that is different from the PyTorch CUDA version.\\n\"\n \"If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\\n\"\n )\n\n return PACKAGE_DIR / library_name\n\n\nclass BNBNativeLibrary:\n _lib: ct.CDLL\n compiled_with_cuda = False\n\n def __init__(self, lib: ct.CDLL):\n self._lib = lib\n\n @functools.cache # noqa: B019\n def __getattr__(self, name):\n fn = getattr(self._lib, name, None)\n\n if fn is not None:\n return fn\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(\n f\"Method '{name}' not available in CPU-only version of bitsandbytes.\\n\"\n \"Reinstall with GPU support or use CUDA-enabled hardware.\"\n )\n\n return throw_on_call\n\n def __getitem__(self, item):\n return self.__getattr__(item)\n\n\nclass CudaBNBNativeLibrary(BNBNativeLibrary):\n compiled_with_cuda = True\n\n def __init__(self, lib: ct.CDLL):\n super().__init__(lib)\n lib.get_context.restype = ct.c_void_p\n lib.get_cusparse.restype = ct.c_void_p\n lib.cget_managed_ptr.restype = ct.c_void_p\n\n\ndef get_available_cuda_binary_versions() -> list[str]:\n \"\"\"Get formatted CUDA versions from existing library files using cuda_specs logic\"\"\"\n lib_pattern = f\"libbitsandbytes_{BNB_BACKEND.lower()}*{DYNAMIC_LIBRARY_SUFFIX}\"\n versions = []\n for lib in Path(__file__).parent.glob(lib_pattern):\n pattern = rf\"{BNB_BACKEND.lower()}(\\d+)\"\n match = re.search(pattern, lib.name)\n if match:\n ver_code = int(match.group(1))\n major = ver_code // 10\n minor = ver_code % 10\n versions.append(f\"{major}.{minor}\")\n return sorted(versions)\n\n\ndef parse_cuda_version(version_str: str) -> str:\n \"\"\"Convert raw version string (e.g. '118' from env var) to formatted version (e.g. '11.8')\"\"\"\n if version_str.isdigit():\n return f\"{version_str[:-1]}.{version_str[-1]}\"\n return version_str # fallback as safety net\n\n\nclass ErrorHandlerMockBNBNativeLibrary(BNBNativeLibrary):\n \"\"\"\n Mock library handler that defers errors until native methods are called.\n\n This class serves as a fallback when the native bitsandbytes library fails to load.\n It captures the original error and generates detailed troubleshooting guidance.\n\n Key behaviors:\n - Allows attribute access and method assignment without immediate errors\n - Throws a RuntimeError with diagnostic information only when a native method is called, as otherwise it would error out on import, breaking backward compatibility\n - Handles both missing CUDA dependencies and version mismatch scenarios\n\n Error scenarios covered:\n 1. Missing shared library dependencies (e.g., libcudart.so not in LD_LIBRARY_PATH or through PyTorch CUDA installation)\n 2. CUDA version mismatch between PyTorch and available pre-compiled binaries\n 3. Completely missing pre-compiled binaries when CUDA is detected\n 4. Custom BNB_CUDA_VERSION override but mismatch\n 5. CPU-only installation attempts when GPU functionality is requested\n\n \"\"\"\n\n def __init__(self, error_msg: str):\n self.error_msg = error_msg\n self.user_cuda_version = get_cuda_version_tuple()\n self.available_versions = get_available_cuda_binary_versions()\n self.override_value = os.environ.get(\"BNB_CUDA_VERSION\")\n self.requested_version = (\n parse_cuda_version(self.override_value)\n if self.override_value\n else f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\"\n )\n\n # Pre-generate the error message based on error type\n if \"cannot open shared object file\" in error_msg:\n self.formatted_error = self._format_dependency_error()\n else: # lib loading errors\n self.formatted_error = self._format_lib_error_message(\n available_versions=self.available_versions,\n user_cuda_version=f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\",\n original_error=f\"Original error: {self.error_msg}\\n\" if self.error_msg else \"\",\n requested_version=self.requested_version,\n )\n\n def _format_lib_error_message(\n self,\n available_versions: list[str],\n user_cuda_version: str,\n original_error: str = \"\",\n requested_version: Optional[str] = None,\n ) -> str:\n \"\"\"Format detailed error message for library loading failures\"\"\"\n analysis = \"\"\n no_cpu_lib_found = \"libbitsandbytes_cpu.so: cannot open\" in original_error\n no_cuda_lib_found = f\"{BNB_BACKEND} binary not found\" in original_error\n\n if no_cpu_lib_found:\n analysis = \"\\n🚨 Failed to load CPU-only bitsandbytes library 🚨\\n\\n\"\n\n elif no_cuda_lib_found:\n version_list_str = \"\\n - \" + \"\\n - \".join(available_versions) if available_versions else \"NONE\"\n analysis = (\n (\n f\"\\n🚨 {BNB_BACKEND} VERSION MISMATCH 🚨\\n\"\n f\"Requested {BNB_BACKEND} version: {requested_version}\\n\"\n f\"Detected PyTorch {BNB_BACKEND} version: {user_cuda_version}\\n\"\n f\"Available pre-compiled versions: {version_list_str}\\n\\n\"\n \"This means:\\n\"\n \"The version you're trying to use is NOT distributed with this package\\n\\n\"\n )\n if available_versions\n else \"\\n🚨 Forgot to compile the bitsandbytes library? 🚨\\n\"\n \"1. You're not using the package but checked-out the source code\\n\"\n \"2. You MUST compile from source\\n\\n\"\n )\n\n base_msg = \"Attempted to use bitsandbytes native library functionality but it's not available.\\n\\n\"\n\n troubleshooting = (\n (\n f\"This typically happens when:\\n\"\n f\"1. bitsandbytes doesn't ship with a pre-compiled binary for your {BNB_BACKEND} version\\n\"\n f\"2. The library wasn't compiled properly during installation from source\\n\\n\"\n )\n if no_cuda_lib_found\n else f\"This typically happens when you checked the code out from source and your torch installation doesn't detect {BNB_BACKEND} on your machine.\\n\\n\"\n )\n\n note = (\n (\n f\"To make bitsandbytes work, the compiled library version MUST exactly match the linked {BNB_BACKEND} version.\\n\"\n f\"If your {BNB_BACKEND} version doesn't have a pre-compiled binary, you MUST compile from source.\\n\\n\"\n )\n if no_cuda_lib_found\n else \"\"\n )\n\n compile_instructions = (\n (\"COMPILE FROM SOURCE for CPU-only:\\n `cmake -DCOMPUTE_BACKEND=cpu -S . && make`\\n\\n\")\n if not no_cuda_lib_found\n else (\n \"You have two options:\\n\"\n \"1. COMPILE FROM SOURCE (required if no binary exists):\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation#cuda-compile\\n\"\n \"2. Use BNB_CUDA_VERSION to specify a DIFFERENT CUDA version from the detected one, which is installed on your machine and matching an available pre-compiled version listed above\\n\\n\"\n )\n if not HIP_ENVIRONMENT\n else (\n \"You can COMPILE FROM SOURCE as mentioned here:\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation?backend=AMD+ROCm#amd-gpu\\n\"\n )\n )\n\n diagnostics = (\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n return f\"{analysis}{base_msg}{troubleshooting}{note}{compile_instructions}{original_error}\\n{diagnostics}\"\n\n def _format_dependency_error(self) -> str:\n \"\"\"Format error message for missing shared libraries\"\"\"\n # Extract missing library name from error\n error_parts = self.error_msg.split(\":\")\n missing_lib = error_parts[0].strip() if len(error_parts) > 0 else \"unknown library\"\n cuda_major_version = (\n self.requested_version.split(\".\")[0] if \".\" in self.requested_version else self.requested_version\n )\n\n return (\n f\"\\n🚨 {BNB_BACKEND} SETUP ERROR: Missing dependency: {missing_lib} 🚨\\n\\n\"\n f\"{BNB_BACKEND} {cuda_major_version}.x runtime libraries were not found in the LD_LIBRARY_PATH.\\n\\n\"\n f\"To fix this, make sure that:\\n\"\n f\"1. You have installed {BNB_BACKEND} {cuda_major_version}.x toolkit on your system\\n\"\n f\"2. The {BNB_BACKEND} runtime libraries are in your LD_LIBRARY_PATH\\n\\n\"\n f\"You can add them with (and persist the change by adding the line to your .bashrc):\\n\"\n f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n if cuda_specs:\n cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)\n\n if not cuda_binary_path.exists():\n raise RuntimeError(f\"Configured {BNB_BACKEND} binary not found at {cuda_binary_path}\")\n\n binary_path = cuda_binary_path\n\n if torch._C._has_xpu:\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_xpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n logger.debug(f\"Loading bitsandbytes native library from: {binary_path}\")\n\n # Try to load the library - any errors will propagate up\n dll = ct.cdll.LoadLibrary(str(binary_path))\n\n if hasattr(dll, \"get_context\"): # only a CUDA-built library exposes this\n return CudaBNBNativeLibrary(dll)\n\n return BNBNativeLibrary(dll)\n\n\nROCM_GPU_ARCH = get_rocm_gpu_arch()\n\nHIP_ENVIRONMENT = False\nBNB_BACKEND = \"CPU\"\nif torch.version.hip:\n HIP_ENVIRONMENT = True\n BNB_BACKEND = \"ROCm\"\nelif torch.cuda.is_available():\n BNB_BACKEND = \"CUDA\"\nelif torch._C._has_xpu:\n BNB_BACKEND = \"XPU\"\n\ntry:\n lib = get_native_library()\nexcept Exception as e:\n if BNB_BACKEND in (\"CPU\", \"XPU\"):\n lib = ErrorHandlerMockBNBNativeLibrary(\"XPU/CPU can run without native library.\")\n else:\n error_msg = str(e)\n logger.error(\n f\"bitsandbytes library load error: {error_msg}\",\n exc_info=True,\n )\n\n # create a mock with error messaging as fallback\n lib = ErrorHandlerMockBNBNativeLibrary(error_msg)","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.get_cuda_bnb_library_path","uri":"program://bitsandbytes/function/bitsandbytes.cextension.get_cuda_bnb_library_path#L17-L42","kind":"function","name":"get_cuda_bnb_library_path","path":"bitsandbytes/cextension.py","language":"python","start_line":17,"end_line":42,"context_start_line":1,"context_end_line":62,"code":"import ctypes as ct\nimport functools\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nfrom typing import Optional\n\nimport torch\n\nfrom bitsandbytes.consts import DYNAMIC_LIBRARY_SUFFIX, PACKAGE_DIR\nfrom bitsandbytes.cuda_specs import CUDASpecs, get_cuda_specs, get_cuda_version_tuple, get_rocm_gpu_arch\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_cuda_bnb_library_path(cuda_specs: CUDASpecs) -> Path:\n \"\"\"\n Get the disk path to the CUDA BNB native library specified by the\n given CUDA specs, taking into account the `BNB_CUDA_VERSION` override environment variable.\n\n The library is not guaranteed to exist at the returned path.\n \"\"\"\n\n prefix = \"rocm\" if torch.version.hip else \"cuda\"\n library_name = f\"libbitsandbytes_{prefix}{cuda_specs.cuda_version_string}{DYNAMIC_LIBRARY_SUFFIX}\"\n\n override_value = os.environ.get(\"BNB_CUDA_VERSION\")\n if override_value:\n library_name = re.sub(r\"cuda\\d+\", f\"cuda{override_value}\", library_name, count=1)\n if torch.version.hip:\n raise RuntimeError(\n f\"BNB_CUDA_VERSION={override_value} detected for ROCm!! \\n\"\n f\"Clear the variable and retry: export BNB_CUDA_VERSION=\\n\"\n )\n logger.warning(\n f\"WARNING: BNB_CUDA_VERSION={override_value} environment variable detected; loading {library_name}.\\n\"\n \"This can be used to load a bitsandbytes version built with a CUDA version that is different from the PyTorch CUDA version.\\n\"\n \"If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\\n\"\n )\n\n return PACKAGE_DIR / library_name\n\n\nclass BNBNativeLibrary:\n _lib: ct.CDLL\n compiled_with_cuda = False\n\n def __init__(self, lib: ct.CDLL):\n self._lib = lib\n\n @functools.cache # noqa: B019\n def __getattr__(self, name):\n fn = getattr(self._lib, name, None)\n\n if fn is not None:\n return fn\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(\n f\"Method '{name}' not available in CPU-only version of bitsandbytes.\\n\"\n \"Reinstall with GPU support or use CUDA-enabled hardware.\"","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.BNBNativeLibrary","uri":"program://bitsandbytes/class/bitsandbytes.cextension.BNBNativeLibrary#L45-L68","kind":"class","name":"BNBNativeLibrary","path":"bitsandbytes/cextension.py","language":"python","start_line":45,"end_line":68,"context_start_line":25,"context_end_line":88,"code":" prefix = \"rocm\" if torch.version.hip else \"cuda\"\n library_name = f\"libbitsandbytes_{prefix}{cuda_specs.cuda_version_string}{DYNAMIC_LIBRARY_SUFFIX}\"\n\n override_value = os.environ.get(\"BNB_CUDA_VERSION\")\n if override_value:\n library_name = re.sub(r\"cuda\\d+\", f\"cuda{override_value}\", library_name, count=1)\n if torch.version.hip:\n raise RuntimeError(\n f\"BNB_CUDA_VERSION={override_value} detected for ROCm!! \\n\"\n f\"Clear the variable and retry: export BNB_CUDA_VERSION=\\n\"\n )\n logger.warning(\n f\"WARNING: BNB_CUDA_VERSION={override_value} environment variable detected; loading {library_name}.\\n\"\n \"This can be used to load a bitsandbytes version built with a CUDA version that is different from the PyTorch CUDA version.\\n\"\n \"If this was unintended set the BNB_CUDA_VERSION variable to an empty string: export BNB_CUDA_VERSION=\\n\"\n )\n\n return PACKAGE_DIR / library_name\n\n\nclass BNBNativeLibrary:\n _lib: ct.CDLL\n compiled_with_cuda = False\n\n def __init__(self, lib: ct.CDLL):\n self._lib = lib\n\n @functools.cache # noqa: B019\n def __getattr__(self, name):\n fn = getattr(self._lib, name, None)\n\n if fn is not None:\n return fn\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(\n f\"Method '{name}' not available in CPU-only version of bitsandbytes.\\n\"\n \"Reinstall with GPU support or use CUDA-enabled hardware.\"\n )\n\n return throw_on_call\n\n def __getitem__(self, item):\n return self.__getattr__(item)\n\n\nclass CudaBNBNativeLibrary(BNBNativeLibrary):\n compiled_with_cuda = True\n\n def __init__(self, lib: ct.CDLL):\n super().__init__(lib)\n lib.get_context.restype = ct.c_void_p\n lib.get_cusparse.restype = ct.c_void_p\n lib.cget_managed_ptr.restype = ct.c_void_p\n\n\ndef get_available_cuda_binary_versions() -> list[str]:\n \"\"\"Get formatted CUDA versions from existing library files using cuda_specs logic\"\"\"\n lib_pattern = f\"libbitsandbytes_{BNB_BACKEND.lower()}*{DYNAMIC_LIBRARY_SUFFIX}\"\n versions = []\n for lib in Path(__file__).parent.glob(lib_pattern):\n pattern = rf\"{BNB_BACKEND.lower()}(\\d+)\"\n match = re.search(pattern, lib.name)\n if match:","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.CudaBNBNativeLibrary","uri":"program://bitsandbytes/class/bitsandbytes.cextension.CudaBNBNativeLibrary#L71-L78","kind":"class","name":"CudaBNBNativeLibrary","path":"bitsandbytes/cextension.py","language":"python","start_line":71,"end_line":78,"context_start_line":51,"context_end_line":98,"code":"\n @functools.cache # noqa: B019\n def __getattr__(self, name):\n fn = getattr(self._lib, name, None)\n\n if fn is not None:\n return fn\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(\n f\"Method '{name}' not available in CPU-only version of bitsandbytes.\\n\"\n \"Reinstall with GPU support or use CUDA-enabled hardware.\"\n )\n\n return throw_on_call\n\n def __getitem__(self, item):\n return self.__getattr__(item)\n\n\nclass CudaBNBNativeLibrary(BNBNativeLibrary):\n compiled_with_cuda = True\n\n def __init__(self, lib: ct.CDLL):\n super().__init__(lib)\n lib.get_context.restype = ct.c_void_p\n lib.get_cusparse.restype = ct.c_void_p\n lib.cget_managed_ptr.restype = ct.c_void_p\n\n\ndef get_available_cuda_binary_versions() -> list[str]:\n \"\"\"Get formatted CUDA versions from existing library files using cuda_specs logic\"\"\"\n lib_pattern = f\"libbitsandbytes_{BNB_BACKEND.lower()}*{DYNAMIC_LIBRARY_SUFFIX}\"\n versions = []\n for lib in Path(__file__).parent.glob(lib_pattern):\n pattern = rf\"{BNB_BACKEND.lower()}(\\d+)\"\n match = re.search(pattern, lib.name)\n if match:\n ver_code = int(match.group(1))\n major = ver_code // 10\n minor = ver_code % 10\n versions.append(f\"{major}.{minor}\")\n return sorted(versions)\n\n\ndef parse_cuda_version(version_str: str) -> str:\n \"\"\"Convert raw version string (e.g. '118' from env var) to formatted version (e.g. '11.8')\"\"\"\n if version_str.isdigit():","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.get_available_cuda_binary_versions","uri":"program://bitsandbytes/function/bitsandbytes.cextension.get_available_cuda_binary_versions#L81-L93","kind":"function","name":"get_available_cuda_binary_versions","path":"bitsandbytes/cextension.py","language":"python","start_line":81,"end_line":93,"context_start_line":61,"context_end_line":113,"code":" f\"Method '{name}' not available in CPU-only version of bitsandbytes.\\n\"\n \"Reinstall with GPU support or use CUDA-enabled hardware.\"\n )\n\n return throw_on_call\n\n def __getitem__(self, item):\n return self.__getattr__(item)\n\n\nclass CudaBNBNativeLibrary(BNBNativeLibrary):\n compiled_with_cuda = True\n\n def __init__(self, lib: ct.CDLL):\n super().__init__(lib)\n lib.get_context.restype = ct.c_void_p\n lib.get_cusparse.restype = ct.c_void_p\n lib.cget_managed_ptr.restype = ct.c_void_p\n\n\ndef get_available_cuda_binary_versions() -> list[str]:\n \"\"\"Get formatted CUDA versions from existing library files using cuda_specs logic\"\"\"\n lib_pattern = f\"libbitsandbytes_{BNB_BACKEND.lower()}*{DYNAMIC_LIBRARY_SUFFIX}\"\n versions = []\n for lib in Path(__file__).parent.glob(lib_pattern):\n pattern = rf\"{BNB_BACKEND.lower()}(\\d+)\"\n match = re.search(pattern, lib.name)\n if match:\n ver_code = int(match.group(1))\n major = ver_code // 10\n minor = ver_code % 10\n versions.append(f\"{major}.{minor}\")\n return sorted(versions)\n\n\ndef parse_cuda_version(version_str: str) -> str:\n \"\"\"Convert raw version string (e.g. '118' from env var) to formatted version (e.g. '11.8')\"\"\"\n if version_str.isdigit():\n return f\"{version_str[:-1]}.{version_str[-1]}\"\n return version_str # fallback as safety net\n\n\nclass ErrorHandlerMockBNBNativeLibrary(BNBNativeLibrary):\n \"\"\"\n Mock library handler that defers errors until native methods are called.\n\n This class serves as a fallback when the native bitsandbytes library fails to load.\n It captures the original error and generates detailed troubleshooting guidance.\n\n Key behaviors:\n - Allows attribute access and method assignment without immediate errors\n - Throws a RuntimeError with diagnostic information only when a native method is called, as otherwise it would error out on import, breaking backward compatibility\n - Handles both missing CUDA dependencies and version mismatch scenarios","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.parse_cuda_version","uri":"program://bitsandbytes/function/bitsandbytes.cextension.parse_cuda_version#L96-L100","kind":"function","name":"parse_cuda_version","path":"bitsandbytes/cextension.py","language":"python","start_line":96,"end_line":100,"context_start_line":76,"context_end_line":120,"code":" lib.get_context.restype = ct.c_void_p\n lib.get_cusparse.restype = ct.c_void_p\n lib.cget_managed_ptr.restype = ct.c_void_p\n\n\ndef get_available_cuda_binary_versions() -> list[str]:\n \"\"\"Get formatted CUDA versions from existing library files using cuda_specs logic\"\"\"\n lib_pattern = f\"libbitsandbytes_{BNB_BACKEND.lower()}*{DYNAMIC_LIBRARY_SUFFIX}\"\n versions = []\n for lib in Path(__file__).parent.glob(lib_pattern):\n pattern = rf\"{BNB_BACKEND.lower()}(\\d+)\"\n match = re.search(pattern, lib.name)\n if match:\n ver_code = int(match.group(1))\n major = ver_code // 10\n minor = ver_code % 10\n versions.append(f\"{major}.{minor}\")\n return sorted(versions)\n\n\ndef parse_cuda_version(version_str: str) -> str:\n \"\"\"Convert raw version string (e.g. '118' from env var) to formatted version (e.g. '11.8')\"\"\"\n if version_str.isdigit():\n return f\"{version_str[:-1]}.{version_str[-1]}\"\n return version_str # fallback as safety net\n\n\nclass ErrorHandlerMockBNBNativeLibrary(BNBNativeLibrary):\n \"\"\"\n Mock library handler that defers errors until native methods are called.\n\n This class serves as a fallback when the native bitsandbytes library fails to load.\n It captures the original error and generates detailed troubleshooting guidance.\n\n Key behaviors:\n - Allows attribute access and method assignment without immediate errors\n - Throws a RuntimeError with diagnostic information only when a native method is called, as otherwise it would error out on import, breaking backward compatibility\n - Handles both missing CUDA dependencies and version mismatch scenarios\n\n Error scenarios covered:\n 1. Missing shared library dependencies (e.g., libcudart.so not in LD_LIBRARY_PATH or through PyTorch CUDA installation)\n 2. CUDA version mismatch between PyTorch and available pre-compiled binaries\n 3. Completely missing pre-compiled binaries when CUDA is detected\n 4. Custom BNB_CUDA_VERSION override but mismatch\n 5. CPU-only installation attempts when GPU functionality is requested","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.ErrorHandlerMockBNBNativeLibrary","uri":"program://bitsandbytes/class/bitsandbytes.cextension.ErrorHandlerMockBNBNativeLibrary#L103-L268","kind":"class","name":"ErrorHandlerMockBNBNativeLibrary","path":"bitsandbytes/cextension.py","language":"python","start_line":103,"end_line":268,"context_start_line":83,"context_end_line":288,"code":" lib_pattern = f\"libbitsandbytes_{BNB_BACKEND.lower()}*{DYNAMIC_LIBRARY_SUFFIX}\"\n versions = []\n for lib in Path(__file__).parent.glob(lib_pattern):\n pattern = rf\"{BNB_BACKEND.lower()}(\\d+)\"\n match = re.search(pattern, lib.name)\n if match:\n ver_code = int(match.group(1))\n major = ver_code // 10\n minor = ver_code % 10\n versions.append(f\"{major}.{minor}\")\n return sorted(versions)\n\n\ndef parse_cuda_version(version_str: str) -> str:\n \"\"\"Convert raw version string (e.g. '118' from env var) to formatted version (e.g. '11.8')\"\"\"\n if version_str.isdigit():\n return f\"{version_str[:-1]}.{version_str[-1]}\"\n return version_str # fallback as safety net\n\n\nclass ErrorHandlerMockBNBNativeLibrary(BNBNativeLibrary):\n \"\"\"\n Mock library handler that defers errors until native methods are called.\n\n This class serves as a fallback when the native bitsandbytes library fails to load.\n It captures the original error and generates detailed troubleshooting guidance.\n\n Key behaviors:\n - Allows attribute access and method assignment without immediate errors\n - Throws a RuntimeError with diagnostic information only when a native method is called, as otherwise it would error out on import, breaking backward compatibility\n - Handles both missing CUDA dependencies and version mismatch scenarios\n\n Error scenarios covered:\n 1. Missing shared library dependencies (e.g., libcudart.so not in LD_LIBRARY_PATH or through PyTorch CUDA installation)\n 2. CUDA version mismatch between PyTorch and available pre-compiled binaries\n 3. Completely missing pre-compiled binaries when CUDA is detected\n 4. Custom BNB_CUDA_VERSION override but mismatch\n 5. CPU-only installation attempts when GPU functionality is requested\n\n \"\"\"\n\n def __init__(self, error_msg: str):\n self.error_msg = error_msg\n self.user_cuda_version = get_cuda_version_tuple()\n self.available_versions = get_available_cuda_binary_versions()\n self.override_value = os.environ.get(\"BNB_CUDA_VERSION\")\n self.requested_version = (\n parse_cuda_version(self.override_value)\n if self.override_value\n else f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\"\n )\n\n # Pre-generate the error message based on error type\n if \"cannot open shared object file\" in error_msg:\n self.formatted_error = self._format_dependency_error()\n else: # lib loading errors\n self.formatted_error = self._format_lib_error_message(\n available_versions=self.available_versions,\n user_cuda_version=f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\",\n original_error=f\"Original error: {self.error_msg}\\n\" if self.error_msg else \"\",\n requested_version=self.requested_version,\n )\n\n def _format_lib_error_message(\n self,\n available_versions: list[str],\n user_cuda_version: str,\n original_error: str = \"\",\n requested_version: Optional[str] = None,\n ) -> str:\n \"\"\"Format detailed error message for library loading failures\"\"\"\n analysis = \"\"\n no_cpu_lib_found = \"libbitsandbytes_cpu.so: cannot open\" in original_error\n no_cuda_lib_found = f\"{BNB_BACKEND} binary not found\" in original_error\n\n if no_cpu_lib_found:\n analysis = \"\\n🚨 Failed to load CPU-only bitsandbytes library 🚨\\n\\n\"\n\n elif no_cuda_lib_found:\n version_list_str = \"\\n - \" + \"\\n - \".join(available_versions) if available_versions else \"NONE\"\n analysis = (\n (\n f\"\\n🚨 {BNB_BACKEND} VERSION MISMATCH 🚨\\n\"\n f\"Requested {BNB_BACKEND} version: {requested_version}\\n\"\n f\"Detected PyTorch {BNB_BACKEND} version: {user_cuda_version}\\n\"\n f\"Available pre-compiled versions: {version_list_str}\\n\\n\"\n \"This means:\\n\"\n \"The version you're trying to use is NOT distributed with this package\\n\\n\"\n )\n if available_versions\n else \"\\n🚨 Forgot to compile the bitsandbytes library? 🚨\\n\"\n \"1. You're not using the package but checked-out the source code\\n\"\n \"2. You MUST compile from source\\n\\n\"\n )\n\n base_msg = \"Attempted to use bitsandbytes native library functionality but it's not available.\\n\\n\"\n\n troubleshooting = (\n (\n f\"This typically happens when:\\n\"\n f\"1. bitsandbytes doesn't ship with a pre-compiled binary for your {BNB_BACKEND} version\\n\"\n f\"2. The library wasn't compiled properly during installation from source\\n\\n\"\n )\n if no_cuda_lib_found\n else f\"This typically happens when you checked the code out from source and your torch installation doesn't detect {BNB_BACKEND} on your machine.\\n\\n\"\n )\n\n note = (\n (\n f\"To make bitsandbytes work, the compiled library version MUST exactly match the linked {BNB_BACKEND} version.\\n\"\n f\"If your {BNB_BACKEND} version doesn't have a pre-compiled binary, you MUST compile from source.\\n\\n\"\n )\n if no_cuda_lib_found\n else \"\"\n )\n\n compile_instructions = (\n (\"COMPILE FROM SOURCE for CPU-only:\\n `cmake -DCOMPUTE_BACKEND=cpu -S . && make`\\n\\n\")\n if not no_cuda_lib_found\n else (\n \"You have two options:\\n\"\n \"1. COMPILE FROM SOURCE (required if no binary exists):\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation#cuda-compile\\n\"\n \"2. Use BNB_CUDA_VERSION to specify a DIFFERENT CUDA version from the detected one, which is installed on your machine and matching an available pre-compiled version listed above\\n\\n\"\n )\n if not HIP_ENVIRONMENT\n else (\n \"You can COMPILE FROM SOURCE as mentioned here:\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation?backend=AMD+ROCm#amd-gpu\\n\"\n )\n )\n\n diagnostics = (\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n return f\"{analysis}{base_msg}{troubleshooting}{note}{compile_instructions}{original_error}\\n{diagnostics}\"\n\n def _format_dependency_error(self) -> str:\n \"\"\"Format error message for missing shared libraries\"\"\"\n # Extract missing library name from error\n error_parts = self.error_msg.split(\":\")\n missing_lib = error_parts[0].strip() if len(error_parts) > 0 else \"unknown library\"\n cuda_major_version = (\n self.requested_version.split(\".\")[0] if \".\" in self.requested_version else self.requested_version\n )\n\n return (\n f\"\\n🚨 {BNB_BACKEND} SETUP ERROR: Missing dependency: {missing_lib} 🚨\\n\\n\"\n f\"{BNB_BACKEND} {cuda_major_version}.x runtime libraries were not found in the LD_LIBRARY_PATH.\\n\\n\"\n f\"To fix this, make sure that:\\n\"\n f\"1. You have installed {BNB_BACKEND} {cuda_major_version}.x toolkit on your system\\n\"\n f\"2. The {BNB_BACKEND} runtime libraries are in your LD_LIBRARY_PATH\\n\\n\"\n f\"You can add them with (and persist the change by adding the line to your .bashrc):\\n\"\n f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n if cuda_specs:\n cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)\n\n if not cuda_binary_path.exists():\n raise RuntimeError(f\"Configured {BNB_BACKEND} binary not found at {cuda_binary_path}\")\n\n binary_path = cuda_binary_path\n\n if torch._C._has_xpu:\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_xpu{DYNAMIC_LIBRARY_SUFFIX}\"\n","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.get_native_library","uri":"program://bitsandbytes/function/bitsandbytes.cextension.get_native_library#L271-L297","kind":"function","name":"get_native_library","path":"bitsandbytes/cextension.py","language":"python","start_line":271,"end_line":297,"context_start_line":251,"context_end_line":317,"code":" f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n if cuda_specs:\n cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)\n\n if not cuda_binary_path.exists():\n raise RuntimeError(f\"Configured {BNB_BACKEND} binary not found at {cuda_binary_path}\")\n\n binary_path = cuda_binary_path\n\n if torch._C._has_xpu:\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_xpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n logger.debug(f\"Loading bitsandbytes native library from: {binary_path}\")\n\n # Try to load the library - any errors will propagate up\n dll = ct.cdll.LoadLibrary(str(binary_path))\n\n if hasattr(dll, \"get_context\"): # only a CUDA-built library exposes this\n return CudaBNBNativeLibrary(dll)\n\n return BNBNativeLibrary(dll)\n\n\nROCM_GPU_ARCH = get_rocm_gpu_arch()\n\nHIP_ENVIRONMENT = False\nBNB_BACKEND = \"CPU\"\nif torch.version.hip:\n HIP_ENVIRONMENT = True\n BNB_BACKEND = \"ROCm\"\nelif torch.cuda.is_available():\n BNB_BACKEND = \"CUDA\"\nelif torch._C._has_xpu:\n BNB_BACKEND = \"XPU\"\n\ntry:\n lib = get_native_library()\nexcept Exception as e:\n if BNB_BACKEND in (\"CPU\", \"XPU\"):\n lib = ErrorHandlerMockBNBNativeLibrary(\"XPU/CPU can run without native library.\")\n else:","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.__init__","uri":"program://bitsandbytes/function/bitsandbytes.cextension.__init__#L124-L148","kind":"function","name":"__init__","path":"bitsandbytes/cextension.py","language":"python","start_line":124,"end_line":148,"context_start_line":104,"context_end_line":168,"code":" \"\"\"\n Mock library handler that defers errors until native methods are called.\n\n This class serves as a fallback when the native bitsandbytes library fails to load.\n It captures the original error and generates detailed troubleshooting guidance.\n\n Key behaviors:\n - Allows attribute access and method assignment without immediate errors\n - Throws a RuntimeError with diagnostic information only when a native method is called, as otherwise it would error out on import, breaking backward compatibility\n - Handles both missing CUDA dependencies and version mismatch scenarios\n\n Error scenarios covered:\n 1. Missing shared library dependencies (e.g., libcudart.so not in LD_LIBRARY_PATH or through PyTorch CUDA installation)\n 2. CUDA version mismatch between PyTorch and available pre-compiled binaries\n 3. Completely missing pre-compiled binaries when CUDA is detected\n 4. Custom BNB_CUDA_VERSION override but mismatch\n 5. CPU-only installation attempts when GPU functionality is requested\n\n \"\"\"\n\n def __init__(self, error_msg: str):\n self.error_msg = error_msg\n self.user_cuda_version = get_cuda_version_tuple()\n self.available_versions = get_available_cuda_binary_versions()\n self.override_value = os.environ.get(\"BNB_CUDA_VERSION\")\n self.requested_version = (\n parse_cuda_version(self.override_value)\n if self.override_value\n else f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\"\n )\n\n # Pre-generate the error message based on error type\n if \"cannot open shared object file\" in error_msg:\n self.formatted_error = self._format_dependency_error()\n else: # lib loading errors\n self.formatted_error = self._format_lib_error_message(\n available_versions=self.available_versions,\n user_cuda_version=f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\",\n original_error=f\"Original error: {self.error_msg}\\n\" if self.error_msg else \"\",\n requested_version=self.requested_version,\n )\n\n def _format_lib_error_message(\n self,\n available_versions: list[str],\n user_cuda_version: str,\n original_error: str = \"\",\n requested_version: Optional[str] = None,\n ) -> str:\n \"\"\"Format detailed error message for library loading failures\"\"\"\n analysis = \"\"\n no_cpu_lib_found = \"libbitsandbytes_cpu.so: cannot open\" in original_error\n no_cuda_lib_found = f\"{BNB_BACKEND} binary not found\" in original_error\n\n if no_cpu_lib_found:\n analysis = \"\\n🚨 Failed to load CPU-only bitsandbytes library 🚨\\n\\n\"\n\n elif no_cuda_lib_found:\n version_list_str = \"\\n - \" + \"\\n - \".join(available_versions) if available_versions else \"NONE\"\n analysis = (\n (","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.__getattr__","uri":"program://bitsandbytes/function/bitsandbytes.cextension.__getattr__#L259-L265","kind":"function","name":"__getattr__","path":"bitsandbytes/cextension.py","language":"python","start_line":259,"end_line":265,"context_start_line":239,"context_end_line":285,"code":"\n return (\n f\"\\n🚨 {BNB_BACKEND} SETUP ERROR: Missing dependency: {missing_lib} 🚨\\n\\n\"\n f\"{BNB_BACKEND} {cuda_major_version}.x runtime libraries were not found in the LD_LIBRARY_PATH.\\n\\n\"\n f\"To fix this, make sure that:\\n\"\n f\"1. You have installed {BNB_BACKEND} {cuda_major_version}.x toolkit on your system\\n\"\n f\"2. The {BNB_BACKEND} runtime libraries are in your LD_LIBRARY_PATH\\n\\n\"\n f\"You can add them with (and persist the change by adding the line to your .bashrc):\\n\"\n f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n if cuda_specs:\n cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)\n\n if not cuda_binary_path.exists():\n raise RuntimeError(f\"Configured {BNB_BACKEND} binary not found at {cuda_binary_path}\")\n\n binary_path = cuda_binary_path\n","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.__getitem__","uri":"program://bitsandbytes/function/bitsandbytes.cextension.__getitem__#L267-L268","kind":"function","name":"__getitem__","path":"bitsandbytes/cextension.py","language":"python","start_line":267,"end_line":268,"context_start_line":247,"context_end_line":288,"code":" f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n if cuda_specs:\n cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)\n\n if not cuda_binary_path.exists():\n raise RuntimeError(f\"Configured {BNB_BACKEND} binary not found at {cuda_binary_path}\")\n\n binary_path = cuda_binary_path\n\n if torch._C._has_xpu:\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_xpu{DYNAMIC_LIBRARY_SUFFIX}\"\n","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension._format_lib_error_message","uri":"program://bitsandbytes/function/bitsandbytes.cextension._format_lib_error_message#L150-L229","kind":"function","name":"_format_lib_error_message","path":"bitsandbytes/cextension.py","language":"python","start_line":150,"end_line":229,"context_start_line":130,"context_end_line":249,"code":" parse_cuda_version(self.override_value)\n if self.override_value\n else f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\"\n )\n\n # Pre-generate the error message based on error type\n if \"cannot open shared object file\" in error_msg:\n self.formatted_error = self._format_dependency_error()\n else: # lib loading errors\n self.formatted_error = self._format_lib_error_message(\n available_versions=self.available_versions,\n user_cuda_version=f\"{self.user_cuda_version[0]}.{self.user_cuda_version[1]}\"\n if self.user_cuda_version\n else \"unknown\",\n original_error=f\"Original error: {self.error_msg}\\n\" if self.error_msg else \"\",\n requested_version=self.requested_version,\n )\n\n def _format_lib_error_message(\n self,\n available_versions: list[str],\n user_cuda_version: str,\n original_error: str = \"\",\n requested_version: Optional[str] = None,\n ) -> str:\n \"\"\"Format detailed error message for library loading failures\"\"\"\n analysis = \"\"\n no_cpu_lib_found = \"libbitsandbytes_cpu.so: cannot open\" in original_error\n no_cuda_lib_found = f\"{BNB_BACKEND} binary not found\" in original_error\n\n if no_cpu_lib_found:\n analysis = \"\\n🚨 Failed to load CPU-only bitsandbytes library 🚨\\n\\n\"\n\n elif no_cuda_lib_found:\n version_list_str = \"\\n - \" + \"\\n - \".join(available_versions) if available_versions else \"NONE\"\n analysis = (\n (\n f\"\\n🚨 {BNB_BACKEND} VERSION MISMATCH 🚨\\n\"\n f\"Requested {BNB_BACKEND} version: {requested_version}\\n\"\n f\"Detected PyTorch {BNB_BACKEND} version: {user_cuda_version}\\n\"\n f\"Available pre-compiled versions: {version_list_str}\\n\\n\"\n \"This means:\\n\"\n \"The version you're trying to use is NOT distributed with this package\\n\\n\"\n )\n if available_versions\n else \"\\n🚨 Forgot to compile the bitsandbytes library? 🚨\\n\"\n \"1. You're not using the package but checked-out the source code\\n\"\n \"2. You MUST compile from source\\n\\n\"\n )\n\n base_msg = \"Attempted to use bitsandbytes native library functionality but it's not available.\\n\\n\"\n\n troubleshooting = (\n (\n f\"This typically happens when:\\n\"\n f\"1. bitsandbytes doesn't ship with a pre-compiled binary for your {BNB_BACKEND} version\\n\"\n f\"2. The library wasn't compiled properly during installation from source\\n\\n\"\n )\n if no_cuda_lib_found\n else f\"This typically happens when you checked the code out from source and your torch installation doesn't detect {BNB_BACKEND} on your machine.\\n\\n\"\n )\n\n note = (\n (\n f\"To make bitsandbytes work, the compiled library version MUST exactly match the linked {BNB_BACKEND} version.\\n\"\n f\"If your {BNB_BACKEND} version doesn't have a pre-compiled binary, you MUST compile from source.\\n\\n\"\n )\n if no_cuda_lib_found\n else \"\"\n )\n\n compile_instructions = (\n (\"COMPILE FROM SOURCE for CPU-only:\\n `cmake -DCOMPUTE_BACKEND=cpu -S . && make`\\n\\n\")\n if not no_cuda_lib_found\n else (\n \"You have two options:\\n\"\n \"1. COMPILE FROM SOURCE (required if no binary exists):\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation#cuda-compile\\n\"\n \"2. Use BNB_CUDA_VERSION to specify a DIFFERENT CUDA version from the detected one, which is installed on your machine and matching an available pre-compiled version listed above\\n\\n\"\n )\n if not HIP_ENVIRONMENT\n else (\n \"You can COMPILE FROM SOURCE as mentioned here:\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation?backend=AMD+ROCm#amd-gpu\\n\"\n )\n )\n\n diagnostics = (\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n return f\"{analysis}{base_msg}{troubleshooting}{note}{compile_instructions}{original_error}\\n{diagnostics}\"\n\n def _format_dependency_error(self) -> str:\n \"\"\"Format error message for missing shared libraries\"\"\"\n # Extract missing library name from error\n error_parts = self.error_msg.split(\":\")\n missing_lib = error_parts[0].strip() if len(error_parts) > 0 else \"unknown library\"\n cuda_major_version = (\n self.requested_version.split(\".\")[0] if \".\" in self.requested_version else self.requested_version\n )\n\n return (\n f\"\\n🚨 {BNB_BACKEND} SETUP ERROR: Missing dependency: {missing_lib} 🚨\\n\\n\"\n f\"{BNB_BACKEND} {cuda_major_version}.x runtime libraries were not found in the LD_LIBRARY_PATH.\\n\\n\"\n f\"To fix this, make sure that:\\n\"\n f\"1. You have installed {BNB_BACKEND} {cuda_major_version}.x toolkit on your system\\n\"\n f\"2. The {BNB_BACKEND} runtime libraries are in your LD_LIBRARY_PATH\\n\\n\"\n f\"You can add them with (and persist the change by adding the line to your .bashrc):\\n\"\n f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension._format_dependency_error","uri":"program://bitsandbytes/function/bitsandbytes.cextension._format_dependency_error#L231-L257","kind":"function","name":"_format_dependency_error","path":"bitsandbytes/cextension.py","language":"python","start_line":231,"end_line":257,"context_start_line":211,"context_end_line":277,"code":" )\n if not HIP_ENVIRONMENT\n else (\n \"You can COMPILE FROM SOURCE as mentioned here:\\n\"\n \" https://huggingface.co/docs/bitsandbytes/main/en/installation?backend=AMD+ROCm#amd-gpu\\n\"\n )\n )\n\n diagnostics = (\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n return f\"{analysis}{base_msg}{troubleshooting}{note}{compile_instructions}{original_error}\\n{diagnostics}\"\n\n def _format_dependency_error(self) -> str:\n \"\"\"Format error message for missing shared libraries\"\"\"\n # Extract missing library name from error\n error_parts = self.error_msg.split(\":\")\n missing_lib = error_parts[0].strip() if len(error_parts) > 0 else \"unknown library\"\n cuda_major_version = (\n self.requested_version.split(\".\")[0] if \".\" in self.requested_version else self.requested_version\n )\n\n return (\n f\"\\n🚨 {BNB_BACKEND} SETUP ERROR: Missing dependency: {missing_lib} 🚨\\n\\n\"\n f\"{BNB_BACKEND} {cuda_major_version}.x runtime libraries were not found in the LD_LIBRARY_PATH.\\n\\n\"\n f\"To fix this, make sure that:\\n\"\n f\"1. You have installed {BNB_BACKEND} {cuda_major_version}.x toolkit on your system\\n\"\n f\"2. The {BNB_BACKEND} runtime libraries are in your LD_LIBRARY_PATH\\n\\n\"\n f\"You can add them with (and persist the change by adding the line to your .bashrc):\\n\"\n f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.cextension.throw_on_call","uri":"program://bitsandbytes/function/bitsandbytes.cextension.throw_on_call#L262-L263","kind":"function","name":"throw_on_call","path":"bitsandbytes/cextension.py","language":"python","start_line":262,"end_line":263,"context_start_line":242,"context_end_line":283,"code":" f\"{BNB_BACKEND} {cuda_major_version}.x runtime libraries were not found in the LD_LIBRARY_PATH.\\n\\n\"\n f\"To fix this, make sure that:\\n\"\n f\"1. You have installed {BNB_BACKEND} {cuda_major_version}.x toolkit on your system\\n\"\n f\"2. The {BNB_BACKEND} runtime libraries are in your LD_LIBRARY_PATH\\n\\n\"\n f\"You can add them with (and persist the change by adding the line to your .bashrc):\\n\"\n f\" export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/path/to/{BNB_BACKEND.lower()}-{cuda_major_version}.x/\\\n {'lib64' if not HIP_ENVIRONMENT else 'lib'}\\n\\n\"\n f\"Original error: {self.error_msg}\\n\\n\"\n f\"🔍 Run this command for detailed diagnostics:\\n\"\n f\"python -m bitsandbytes\\n\\n\"\n f\"If you've tried everything and still have issues:\\n\"\n f\"1. Include ALL version info (operating system, bitsandbytes, pytorch, {BNB_BACKEND.lower()}, python)\\n\"\n f\"2. Describe what you've tried in detail\\n\"\n f\"3. Open an issue with this information:\\n\"\n f\" https://github.com/bitsandbytes-foundation/bitsandbytes/issues\\n\\n\"\n )\n\n def __getattr__(self, name):\n \"\"\"Return a dummy function that throws when called, rather than on attribute access\"\"\"\n\n def throw_on_call(*args, **kwargs):\n raise RuntimeError(f\"{self.formatted_error}Native code method attempted to call: lib.{name}()\")\n\n return throw_on_call\n\n def __getitem__(self, name):\n return self.__getattr__(name)\n\n\ndef get_native_library() -> BNBNativeLibrary:\n \"\"\"\n Load CUDA library XOR CPU, as the latter contains a subset of symbols of the former.\n \"\"\"\n cuda_specs = get_cuda_specs()\n binary_path = PACKAGE_DIR / f\"libbitsandbytes_cpu{DYNAMIC_LIBRARY_SUFFIX}\"\n\n if cuda_specs:\n cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)\n\n if not cuda_binary_path.exists():\n raise RuntimeError(f\"Configured {BNB_BACKEND} binary not found at {cuda_binary_path}\")\n","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.utils","uri":"program://bitsandbytes/module/bitsandbytes.backends.utils#L1-L83","kind":"module","name":"bitsandbytes.backends.utils","path":"bitsandbytes/backends/utils.py","language":"python","start_line":1,"end_line":83,"context_start_line":1,"context_end_line":83,"code":"import subprocess\n\nfrom packaging import version\nimport torch\n\ntry:\n import triton # noqa: F401\n import triton.language as tl # noqa: F401\n\n triton_available = True\nexcept ImportError:\n triton_available = False\n\n\n_NF4_QUANT_TABLE = torch.tensor(\n [\n -1.0,\n -0.6961928009986877,\n -0.5250730514526367,\n -0.39491748809814453,\n -0.28444138169288635,\n -0.18477343022823334,\n -0.09105003625154495,\n 0.0,\n 0.07958029955625534,\n 0.16093020141124725,\n 0.24611230194568634,\n 0.33791524171829224,\n 0.44070982933044434,\n 0.5626170039176941,\n 0.7229568362236023,\n 1.0,\n ],\n dtype=torch.float32,\n device=\"xpu\"\n if hasattr(torch, \"xpu\") and torch.xpu.is_available()\n else \"cpu\", # Only cpu/xpu use this table for now.\n)\n_FP4_QUANT_TABLE = torch.tensor(\n [\n 0.0000,\n 0.0052,\n 0.6667,\n 1.0000,\n 0.3333,\n 0.5000,\n 0.1667,\n 0.2500,\n 0.0000,\n -0.0052,\n -0.6667,\n -1.0000,\n -0.3333,\n -0.5000,\n -0.1667,\n -0.2500,\n ],\n dtype=torch.float32,\n device=\"xpu\"\n if hasattr(torch, \"xpu\") and torch.xpu.is_available()\n else \"cpu\", # Only cpu/xpu use this table for now.\n)\nCODE = {\"nf4\": _NF4_QUANT_TABLE, \"fp4\": _FP4_QUANT_TABLE}\n\n\ndef get_gaudi_sw_version():\n \"\"\"\n Returns the installed version of Gaudi SW.\n \"\"\"\n output = subprocess.run(\n \"pip list | grep habana-torch-plugin\",\n shell=True,\n text=True,\n capture_output=True,\n )\n # If grep return nothing\n if not output.stdout.strip():\n return None\n\n return version.parse(output.stdout.split(\"\\n\")[0].split()[-1])\n\n\nGAUDI_SW_VER = get_gaudi_sw_version()","source_hash":"242b8fed686cc3a2d52b0b2e86b321d5540ee3932ae02dbb739c5c23dca5edf3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.utils.get_gaudi_sw_version","uri":"program://bitsandbytes/function/bitsandbytes.backends.utils.get_gaudi_sw_version#L66-L80","kind":"function","name":"get_gaudi_sw_version","path":"bitsandbytes/backends/utils.py","language":"python","start_line":66,"end_line":80,"context_start_line":46,"context_end_line":83,"code":" 0.5000,\n 0.1667,\n 0.2500,\n 0.0000,\n -0.0052,\n -0.6667,\n -1.0000,\n -0.3333,\n -0.5000,\n -0.1667,\n -0.2500,\n ],\n dtype=torch.float32,\n device=\"xpu\"\n if hasattr(torch, \"xpu\") and torch.xpu.is_available()\n else \"cpu\", # Only cpu/xpu use this table for now.\n)\nCODE = {\"nf4\": _NF4_QUANT_TABLE, \"fp4\": _FP4_QUANT_TABLE}\n\n\ndef get_gaudi_sw_version():\n \"\"\"\n Returns the installed version of Gaudi SW.\n \"\"\"\n output = subprocess.run(\n \"pip list | grep habana-torch-plugin\",\n shell=True,\n text=True,\n capture_output=True,\n )\n # If grep return nothing\n if not output.stdout.strip():\n return None\n\n return version.parse(output.stdout.split(\"\\n\")[0].split()[-1])\n\n\nGAUDI_SW_VER = get_gaudi_sw_version()","source_hash":"242b8fed686cc3a2d52b0b2e86b321d5540ee3932ae02dbb739c5c23dca5edf3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.default.ops","uri":"program://bitsandbytes/module/bitsandbytes.backends.default.ops#L1-L578","kind":"module","name":"bitsandbytes.backends.default.ops","path":"bitsandbytes/backends/default/ops.py","language":"python","start_line":1,"end_line":578,"context_start_line":1,"context_end_line":578,"code":"from collections.abc import Sequence\nfrom math import prod, sqrt\nfrom typing import Optional\n\nimport torch\n\nfrom ..._ops import register_kernel\nfrom ..utils import CODE\n\n\n@register_kernel(\"bitsandbytes::int8_mm_dequant\", \"default\")\ndef _(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n bias: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n torch._check(A.dtype == torch.int32, lambda: f\"A must be int32, got {A.dtype}\")\n torch._check(row_stats.dtype == torch.float32, lambda: f\"row_stats must be float32, got {row_stats.dtype}\")\n torch._check(col_stats.dtype == torch.float32, lambda: f\"col_stats must be float32, got {col_stats.dtype}\")\n\n A_calc = A.view(-1, A.shape[-1])\n row_stats = row_stats.reshape(-1).unsqueeze(-1)\n col_stats = col_stats.reshape(-1).unsqueeze(0)\n\n out = A_calc * (row_stats * col_stats) * 6.200124e-05\n if bias is not None:\n out += bias\n\n return out.to(dtype or torch.float16)\n\n\n@register_kernel(\"bitsandbytes::int8_mixed_scaled_mm\", \"default\")\ndef _(\n A: torch.Tensor,\n CA: torch.Tensor,\n CB: torch.Tensor,\n SCA: torch.Tensor,\n SCB: torch.Tensor,\n outlier_cols: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n) -> tuple[torch.Tensor, Optional[torch.Tensor]]:\n subB = None\n\n if outlier_cols is not None and outlier_cols.numel():\n # Extract the inputs with outliers in original precision\n subA = A[:, outlier_cols].contiguous()\n\n # Dequantize the corresponding weight columns\n subB = (\n torch.ops.bitsandbytes.int8_vectorwise_dequant.default(CB[:, outlier_cols].contiguous(), SCB)\n .to(A.dtype)\n .t()\n )\n\n # TODO: if state.has_fp16_weights: subB = B[:, outlier_cols].t()\n\n else:\n # Needed for torch.compile when there are no outliers.\n subA = torch.empty(0, device=A.device, dtype=A.dtype)\n\n # Int8 Matmul + Dequant + Bias\n output = torch.ops.bitsandbytes.int8_scaled_mm.default(CA, CB, SCA, SCB, bias=bias, dtype=A.dtype)\n\n if subB is not None:\n # Add the outlier columns back to the output\n output = output.addmm(subA, subB)\n\n return output, subA\n\n\n@register_kernel(\"bitsandbytes::int8_scaled_mm\", \"default\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n bias: Optional[torch.Tensor] = None,\n dtype: Optional[torch.dtype] = None,\n) -> torch.Tensor:\n out_i32 = torch.ops.bitsandbytes.int8_linear_matmul.default(A, B)\n return torch.ops.bitsandbytes.int8_mm_dequant.default(\n out_i32,\n row_stats,\n col_stats,\n dtype=dtype or torch.float16,\n bias=bias,\n )\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul\", \"default\")\ndef _(A: torch.Tensor, B: torch.Tensor):\n return _int8_linear_matmul_impl(A, B)\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul.out\", \"default\")\ndef _(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n torch._check(out.dtype == torch.int32)\n _int8_linear_matmul_impl(A, B, out)\n\n\ndef _int8_linear_matmul_impl(A: torch.Tensor, B: torch.Tensor, out: Optional[torch.Tensor] = None):\n # Naive implementation: perform matmul in fp32\n result = torch.matmul(A.float(), B.float().t()).to(torch.int32)\n if out is not None:\n result = out.copy_(result)\n return result\n\n\n@register_kernel(\"bitsandbytes::int8_vectorwise_quant\", \"default\")\ndef _(A: torch.Tensor, threshold=0.0):\n rows = prod(A.shape[:-1])\n outlier_cols = None\n\n outlier_restore = None\n\n if threshold > 0.0:\n outliers = A.abs() >= threshold\n\n if outliers.any():\n # Determine which columns contain outliers, and zero out the\n # outliers ahead of quantization. We need to keep a backup of these\n # outliers to restore them after quantization.\n outlier_cols = torch.argwhere(outliers.any(dim=0)).view(-1)\n outlier_restore = A[outliers].clone()\n A[outliers] = 0\n else:\n # Needed for torch.compile support.\n outlier_cols = torch.empty(0, device=A.device, dtype=torch.int64)\n\n # Get absmax for each row.\n row_stats = torch.max(A.abs(), dim=1).values.float()\n\n # Quantize row-wise to int8.\n out_row = torch.round(A * (127.0 / row_stats.unsqueeze(-1))).to(torch.int8)\n\n # Zero out values from outlier columns across all rows.\n if rows > 1 and outlier_cols is not None:\n out_row[:, outlier_cols] = 0\n\n # Restore outliers.\n if outlier_restore is not None:\n A[outliers] = outlier_restore\n\n return out_row, row_stats, outlier_cols\n\n\n@register_kernel(\"bitsandbytes::quantize_blockwise\", \"default\")\ndef _(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n\n n = A.numel()\n rem = n % blocksize\n has_rem = rem > 0\n blocks = n // blocksize + has_rem\n absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)\n A_reshaped = A.reshape(n)\n A_com = A_reshaped[: n - rem]\n A_com_reshaped = A_com.reshape(n // blocksize, blocksize)\n absmax[: blocks - has_rem] = torch.abs(A_com_reshaped).max(dim=-1)[0]\n scaled_A = torch.clamp(A_com_reshaped * (1 / absmax[: blocks - has_rem].view(-1, 1)), -1, 1)\n scaled_A = scaled_A.reshape(-1)\n if has_rem:\n absmax[-1] = torch.abs(A_reshaped[n - rem :]).max()\n scaled_A_rem = torch.clamp(A_reshaped[n - rem :] * (1 / absmax[-1]), -1, 1)\n scaled_A = torch.cat([scaled_A, scaled_A_rem], dim=0)\n\n diff = torch.abs(scaled_A.unsqueeze(-1) - code.to(scaled_A.device))\n out = torch.argmin(diff, dim=-1).to(torch.uint8).to(scaled_A.device).reshape(A.shape)\n\n return out, absmax\n\n\n@register_kernel(\"bitsandbytes::dequantize_blockwise\", \"default\")\ndef _(A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n\n out = code[A.reshape(-1).int()]\n blocks = out.shape[-1] // blocksize\n res = out.shape[-1] % blocksize\n if res != 0:\n out = torch.nn.functional.pad(out, (0, blocksize - res), mode=\"constant\", value=0)\n out = (out.view(-1, blocksize) * absmax.view(-1, 1)).to(dtype).reshape(-1)\n out = out[: blocks * blocksize + res]\n out = out.reshape(A.shape)\n\n return out\n\n\n@register_kernel(\"bitsandbytes::quantize_4bit\", \"default\")\ndef _(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n torch._check(quant_type in (\"nf4\", \"fp4\"), lambda: f\"quant_type must be nf4 or fp4, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit quantization only supports 16/32-bit floats, but got {A.dtype}\",\n )\n\n n = A.numel()\n full_blocks = n // blocksize\n rem = n % blocksize\n blocks = full_blocks + 1 if rem else full_blocks\n absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)\n A_flattened = A.reshape(n)\n\n # Scale full blocks of the tensor to [-1, 1]\n A_full_blocks = A_flattened[: n - rem].reshape(n // blocksize, blocksize)\n absmax[:full_blocks] = torch.abs(A_full_blocks).max(dim=-1)[0]\n scaled = torch.clamp(A_full_blocks * (1 / absmax[:full_blocks].view(-1, 1)), -1, 1).reshape(-1)\n\n # Scale any partial block\n if rem:\n A_rem = A_flattened[-rem:]\n absmax[-1] = torch.abs(A_rem).max()\n scaled_rem = torch.clamp(A_rem * (1 / absmax[-1]), -1, 1)\n scaled = torch.cat([scaled, scaled_rem], dim=0)\n\n # Quantize with the lookup table\n code = CODE[quant_type].to(scaled.device).to(scaled.dtype)\n quantized = torch.argmin(torch.abs(scaled.view(-1, 1) - code), dim=-1, keepdim=True).to(torch.uint8)\n\n # Pack two quantized values per byte\n packed = quantized[::2] << 4 | quantized[1::2]\n\n if quant_storage != torch.uint8:\n packed = packed.squeeze().view(quant_storage).unsqueeze(1)\n\n return packed, absmax.float()\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit\", \"default\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(quant_type in (\"nf4\", \"fp4\"), lambda: f\"quant_type must be nf4 or fp4, got {quant_type}\")\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n\n # Enable non uint8 dtype\n if A.dtype != torch.uint8:\n A = A.view(torch.uint8)\n\n A = A.reshape(-1)\n # Map nf4 to [-1, 1]\n out_dq = torch.empty(A.size(0) * 2, dtype=torch.int32, device=A.device)\n n = out_dq.numel()\n out_dq[1::2] = A & 0xF\n out_dq[::2] = A >> 4\n # code is fp32, cast to dtype to avoid the mismatch issue\n code = CODE[quant_type].to(dtype).to(A.device)\n out_dq = code[out_dq]\n\n # Apply scales\n if out_dq.numel() != n:\n assert out_dq.numel() == n + 1\n out_dq = torch.narrow(out_dq, 0, 0, n)\n blocks = n // blocksize\n blocks += 1 if n % blocksize > 0 else 0\n rem = n % blocksize\n has_rem = rem > 0\n\n out = torch.empty(shape, dtype=dtype, device=A.device).reshape(-1)\n if has_rem:\n out[: n - rem] = (out_dq[: n - rem].view(-1, blocksize) * absmax[: blocks - has_rem].view(-1, 1)).reshape(-1)\n out[n - rem :] = out_dq[n - rem :] * absmax[-1]\n else:\n out = out_dq.view(-1, blocksize) * absmax.view(-1, 1)\n\n out = out.reshape(-1, *shape[1:]).to(dtype)\n\n return out\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit\", \"default\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> torch.Tensor:\n # Applied from dequantize_4bit\n quant_type = \"fp4\" if code[1] > 0 else \"nf4\"\n B_dq = torch.ops.bitsandbytes.dequantize_4bit.default(B, absmax, blocksize, quant_type, shapeB, A.dtype)\n\n return torch.nn.functional.linear(\n A,\n B_dq,\n bias=None,\n )\n\n\nMOMENTUM = 0\nRMSPROP = 1\nADAGRAD = 2\nADAM = 3\n# LION should be larger than MOMENTUM, RMSPROP, ADAGRAD due to comparison in kernels\nLION = 4\nADEMAMIX = 5\n\nname2optimizer_id = {\n \"momentum\": MOMENTUM,\n \"rmsprop\": RMSPROP,\n \"adagrad\": ADAGRAD,\n \"adam\": ADAM,\n \"lion\": LION,\n \"ademamix\": ADEMAMIX,\n}\n\n\n@torch.compile\ndef _optimizer_precondition_32bit(\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: torch.Tensor,\n beta1: float,\n beta2: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n optimizer_id: int,\n):\n \"\"\"Preprocessing optimizer, computing update norm\"\"\"\n\n g_vals = gnorm_scale * g\n\n if optimizer_id == 3: # ADAM\n correction1 = 1.0 / (1.0 - beta1**step)\n correction2 = 1.0 / (1.0 - beta2**step)\n\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals\n s2_vals = state2 * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n s1_vals = s1_vals * correction1\n s2_vals = s2_vals * correction2\n\n update_vals = s1_vals / (torch.sqrt(s2_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif optimizer_id == 5: # ADEMAMIX\n update_norm = state1\n\n elif optimizer_id == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = state1 * beta1 + g_vals\n update_norm = s1_vals * s1_vals\n\n elif optimizer_id == 4: # LION\n s1_vals = state1 * beta2 + (1.0 - beta2) * g_vals\n update_norm = s1_vals\n\n elif optimizer_id == 1: # RMSPROP\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_vals = g_vals / (torch.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif optimizer_id == 2: # ADAGRAD\n s1_vals = state1 + g_vals * g_vals\n update_vals = g_vals / (torch.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n total_norm = torch.sum(update_norm)\n unorm_vec.add_(total_norm)\n\n\n@torch.compile\ndef _optimizer_update_32bit(\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n optimizer_id: int,\n):\n \"\"\"Unified optimizer update kernel\"\"\"\n\n p_vals = p.float()\n g_vals = (gnorm_scale * g).float()\n if optimizer_id in [0, 1, 2, 4] and weight_decay > 0.0:\n g_vals = g_vals + p_vals * weight_decay\n\n update_scale = 1.0\n if max_unorm > 0.0:\n current_unorm = torch.sqrt(unorm_vec)\n if optimizer_id in [0, 1, 2, 4]: # 1-state optimizers\n if current_unorm > max_unorm * param_norm + eps:\n update_scale = (max_unorm * param_norm + eps) / current_unorm\n else: # 2-state optimizers\n if current_unorm > max_unorm * param_norm:\n update_scale = (max_unorm * param_norm) / current_unorm\n\n if optimizer_id == 3: # ADAM\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals\n s2_vals = state2 * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n correction1 = 1.0 - beta1**step\n correction2 = sqrt(1.0 - beta2**step)\n step_size = -lr * correction2 / correction1\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n update_val = update_scale * step_size * (s1_vals / (torch.sqrt(s2_vals) + eps * correction2))\n p_vals = p_vals + update_val\n\n state1.copy_(s1_vals)\n state2.copy_(s2_vals)\n\n elif optimizer_id == 5: # ADEMAMIX\n s1_vals = state1[0]\n s3_vals = state1[1]\n s2_vals = state2\n\n m1 = s1_vals * beta1 + (1.0 - beta1) * g_vals\n m2 = s3_vals * beta3 + (1.0 - beta3) * g_vals\n nu = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n correction1 = 1.0 - beta1**step\n correction2 = sqrt(1.0 - beta2**step)\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n mixed_momentum = (m1 / correction1) + (alpha * m2)\n adaptive_term = (torch.sqrt(nu) / correction2) + eps\n p_vals = p_vals - lr * (mixed_momentum / adaptive_term)\n\n state1[0].copy_(m1)\n state1[1].copy_(m2)\n state2.copy_(nu)\n\n elif optimizer_id == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = state1 * beta1 + g_vals\n\n update_val = update_scale * (-lr * s1_vals)\n p_vals = p_vals + update_val\n\n state1.copy_(s1_vals)\n\n elif optimizer_id == 4: # LION\n momentum_update = state1 * beta1 + (1.0 - beta1) * g_vals\n update_val = update_scale * lr * torch.sign(momentum_update)\n p_vals = p_vals - update_val\n\n s1_vals = state1 * beta2 + (1.0 - beta2) * g_vals\n state1.copy_(s1_vals)\n\n elif optimizer_id == 1: # RMSPROP\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_val = update_scale * lr * g_vals / (torch.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n state1.copy_(s1_vals)\n\n elif optimizer_id == 2: # ADAGRAD\n s1_vals = state1 + g_vals * g_vals\n update_val = lr * g_vals / (torch.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n state1.copy_(s1_vals)\n\n p.copy_(p_vals)\n\n\n@register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"default\")\ndef _(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n \"\"\"\n 32-bit optimizer implemented by PyTorch with @torch.compile\n \"\"\"\n if skip_zeros:\n raise NotImplementedError(\"skip_zeros is not supported yet\")\n\n optimizer_id = name2optimizer_id[optimizer_name]\n\n if optimizer_name == \"lion\":\n _optimizer_update_32bit(\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n lr,\n gnorm_scale,\n optimizer_id,\n )\n\n if max_unorm > 0.0:\n unorm_vec.zero_()\n _optimizer_precondition_32bit(\n g, p, state1, state2, unorm_vec, beta1, beta2, eps, weight_decay, step, lr, gnorm_scale, optimizer_id\n )\n else:\n if max_unorm > 0.0:\n unorm_vec.zero_()\n _optimizer_precondition_32bit(\n g, p, state1, state2, unorm_vec, beta1, beta2, eps, weight_decay, step, lr, gnorm_scale, optimizer_id\n )\n\n _optimizer_update_32bit(\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n lr,\n gnorm_scale,\n optimizer_id,\n )","source_hash":"8955f88af54dada2f560f1c4fb88d659605be37ed6be2091583c9422521298c8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.default.ops._","uri":"program://bitsandbytes/function/bitsandbytes.backends.default.ops._#L499-L578","kind":"function","name":"_","path":"bitsandbytes/backends/default/ops.py","language":"python","start_line":499,"end_line":578,"context_start_line":479,"context_end_line":578,"code":" state1.copy_(s1_vals)\n\n elif optimizer_id == 1: # RMSPROP\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_val = update_scale * lr * g_vals / (torch.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n state1.copy_(s1_vals)\n\n elif optimizer_id == 2: # ADAGRAD\n s1_vals = state1 + g_vals * g_vals\n update_val = lr * g_vals / (torch.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n state1.copy_(s1_vals)\n\n p.copy_(p_vals)\n\n\n@register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"default\")\ndef _(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n \"\"\"\n 32-bit optimizer implemented by PyTorch with @torch.compile\n \"\"\"\n if skip_zeros:\n raise NotImplementedError(\"skip_zeros is not supported yet\")\n\n optimizer_id = name2optimizer_id[optimizer_name]\n\n if optimizer_name == \"lion\":\n _optimizer_update_32bit(\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n lr,\n gnorm_scale,\n optimizer_id,\n )\n\n if max_unorm > 0.0:\n unorm_vec.zero_()\n _optimizer_precondition_32bit(\n g, p, state1, state2, unorm_vec, beta1, beta2, eps, weight_decay, step, lr, gnorm_scale, optimizer_id\n )\n else:\n if max_unorm > 0.0:\n unorm_vec.zero_()\n _optimizer_precondition_32bit(\n g, p, state1, state2, unorm_vec, beta1, beta2, eps, weight_decay, step, lr, gnorm_scale, optimizer_id\n )\n\n _optimizer_update_32bit(\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n lr,\n gnorm_scale,\n optimizer_id,\n )","source_hash":"8955f88af54dada2f560f1c4fb88d659605be37ed6be2091583c9422521298c8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.default.ops._int8_linear_matmul_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.default.ops._int8_linear_matmul_impl#L103-L108","kind":"function","name":"_int8_linear_matmul_impl","path":"bitsandbytes/backends/default/ops.py","language":"python","start_line":103,"end_line":108,"context_start_line":83,"context_end_line":128,"code":" return torch.ops.bitsandbytes.int8_mm_dequant.default(\n out_i32,\n row_stats,\n col_stats,\n dtype=dtype or torch.float16,\n bias=bias,\n )\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul\", \"default\")\ndef _(A: torch.Tensor, B: torch.Tensor):\n return _int8_linear_matmul_impl(A, B)\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul.out\", \"default\")\ndef _(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n torch._check(out.dtype == torch.int32)\n _int8_linear_matmul_impl(A, B, out)\n\n\ndef _int8_linear_matmul_impl(A: torch.Tensor, B: torch.Tensor, out: Optional[torch.Tensor] = None):\n # Naive implementation: perform matmul in fp32\n result = torch.matmul(A.float(), B.float().t()).to(torch.int32)\n if out is not None:\n result = out.copy_(result)\n return result\n\n\n@register_kernel(\"bitsandbytes::int8_vectorwise_quant\", \"default\")\ndef _(A: torch.Tensor, threshold=0.0):\n rows = prod(A.shape[:-1])\n outlier_cols = None\n\n outlier_restore = None\n\n if threshold > 0.0:\n outliers = A.abs() >= threshold\n\n if outliers.any():\n # Determine which columns contain outliers, and zero out the\n # outliers ahead of quantization. We need to keep a backup of these\n # outliers to restore them after quantization.\n outlier_cols = torch.argwhere(outliers.any(dim=0)).view(-1)\n outlier_restore = A[outliers].clone()\n A[outliers] = 0\n else:","source_hash":"8955f88af54dada2f560f1c4fb88d659605be37ed6be2091583c9422521298c8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.default.ops._optimizer_precondition_32bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.default.ops._optimizer_precondition_32bit#L325-L382","kind":"function","name":"_optimizer_precondition_32bit","path":"bitsandbytes/backends/default/ops.py","language":"python","start_line":325,"end_line":382,"context_start_line":305,"context_end_line":402,"code":"\nMOMENTUM = 0\nRMSPROP = 1\nADAGRAD = 2\nADAM = 3\n# LION should be larger than MOMENTUM, RMSPROP, ADAGRAD due to comparison in kernels\nLION = 4\nADEMAMIX = 5\n\nname2optimizer_id = {\n \"momentum\": MOMENTUM,\n \"rmsprop\": RMSPROP,\n \"adagrad\": ADAGRAD,\n \"adam\": ADAM,\n \"lion\": LION,\n \"ademamix\": ADEMAMIX,\n}\n\n\n@torch.compile\ndef _optimizer_precondition_32bit(\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: torch.Tensor,\n beta1: float,\n beta2: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n optimizer_id: int,\n):\n \"\"\"Preprocessing optimizer, computing update norm\"\"\"\n\n g_vals = gnorm_scale * g\n\n if optimizer_id == 3: # ADAM\n correction1 = 1.0 / (1.0 - beta1**step)\n correction2 = 1.0 / (1.0 - beta2**step)\n\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals\n s2_vals = state2 * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n s1_vals = s1_vals * correction1\n s2_vals = s2_vals * correction2\n\n update_vals = s1_vals / (torch.sqrt(s2_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif optimizer_id == 5: # ADEMAMIX\n update_norm = state1\n\n elif optimizer_id == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = state1 * beta1 + g_vals\n update_norm = s1_vals * s1_vals\n\n elif optimizer_id == 4: # LION\n s1_vals = state1 * beta2 + (1.0 - beta2) * g_vals\n update_norm = s1_vals\n\n elif optimizer_id == 1: # RMSPROP\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_vals = g_vals / (torch.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif optimizer_id == 2: # ADAGRAD\n s1_vals = state1 + g_vals * g_vals\n update_vals = g_vals / (torch.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n total_norm = torch.sum(update_norm)\n unorm_vec.add_(total_norm)\n\n\n@torch.compile\ndef _optimizer_update_32bit(\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,","source_hash":"8955f88af54dada2f560f1c4fb88d659605be37ed6be2091583c9422521298c8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.default.ops._optimizer_update_32bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.default.ops._optimizer_update_32bit#L386-L495","kind":"function","name":"_optimizer_update_32bit","path":"bitsandbytes/backends/default/ops.py","language":"python","start_line":386,"end_line":495,"context_start_line":366,"context_end_line":515,"code":"\n elif optimizer_id == 4: # LION\n s1_vals = state1 * beta2 + (1.0 - beta2) * g_vals\n update_norm = s1_vals\n\n elif optimizer_id == 1: # RMSPROP\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_vals = g_vals / (torch.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif optimizer_id == 2: # ADAGRAD\n s1_vals = state1 + g_vals * g_vals\n update_vals = g_vals / (torch.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n total_norm = torch.sum(update_norm)\n unorm_vec.add_(total_norm)\n\n\n@torch.compile\ndef _optimizer_update_32bit(\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n optimizer_id: int,\n):\n \"\"\"Unified optimizer update kernel\"\"\"\n\n p_vals = p.float()\n g_vals = (gnorm_scale * g).float()\n if optimizer_id in [0, 1, 2, 4] and weight_decay > 0.0:\n g_vals = g_vals + p_vals * weight_decay\n\n update_scale = 1.0\n if max_unorm > 0.0:\n current_unorm = torch.sqrt(unorm_vec)\n if optimizer_id in [0, 1, 2, 4]: # 1-state optimizers\n if current_unorm > max_unorm * param_norm + eps:\n update_scale = (max_unorm * param_norm + eps) / current_unorm\n else: # 2-state optimizers\n if current_unorm > max_unorm * param_norm:\n update_scale = (max_unorm * param_norm) / current_unorm\n\n if optimizer_id == 3: # ADAM\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals\n s2_vals = state2 * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n correction1 = 1.0 - beta1**step\n correction2 = sqrt(1.0 - beta2**step)\n step_size = -lr * correction2 / correction1\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n update_val = update_scale * step_size * (s1_vals / (torch.sqrt(s2_vals) + eps * correction2))\n p_vals = p_vals + update_val\n\n state1.copy_(s1_vals)\n state2.copy_(s2_vals)\n\n elif optimizer_id == 5: # ADEMAMIX\n s1_vals = state1[0]\n s3_vals = state1[1]\n s2_vals = state2\n\n m1 = s1_vals * beta1 + (1.0 - beta1) * g_vals\n m2 = s3_vals * beta3 + (1.0 - beta3) * g_vals\n nu = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n correction1 = 1.0 - beta1**step\n correction2 = sqrt(1.0 - beta2**step)\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n mixed_momentum = (m1 / correction1) + (alpha * m2)\n adaptive_term = (torch.sqrt(nu) / correction2) + eps\n p_vals = p_vals - lr * (mixed_momentum / adaptive_term)\n\n state1[0].copy_(m1)\n state1[1].copy_(m2)\n state2.copy_(nu)\n\n elif optimizer_id == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = state1 * beta1 + g_vals\n\n update_val = update_scale * (-lr * s1_vals)\n p_vals = p_vals + update_val\n\n state1.copy_(s1_vals)\n\n elif optimizer_id == 4: # LION\n momentum_update = state1 * beta1 + (1.0 - beta1) * g_vals\n update_val = update_scale * lr * torch.sign(momentum_update)\n p_vals = p_vals - update_val\n\n s1_vals = state1 * beta2 + (1.0 - beta2) * g_vals\n state1.copy_(s1_vals)\n\n elif optimizer_id == 1: # RMSPROP\n s1_vals = state1 * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_val = update_scale * lr * g_vals / (torch.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n state1.copy_(s1_vals)\n\n elif optimizer_id == 2: # ADAGRAD\n s1_vals = state1 + g_vals * g_vals\n update_val = lr * g_vals / (torch.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n state1.copy_(s1_vals)\n\n p.copy_(p_vals)\n\n\n@register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"default\")\ndef _(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,","source_hash":"8955f88af54dada2f560f1c4fb88d659605be37ed6be2091583c9422521298c8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.hpu.ops","uri":"program://bitsandbytes/module/bitsandbytes.backends.hpu.ops#L1-L55","kind":"module","name":"bitsandbytes.backends.hpu.ops","path":"bitsandbytes/backends/hpu/ops.py","language":"python","start_line":1,"end_line":55,"context_start_line":1,"context_end_line":55,"code":"from collections.abc import Sequence\nimport math\n\nimport torch\n\nfrom ..._ops import register_kernel\nfrom ..utils import GAUDI_SW_VER\n\n\n# convert btw standard 4-bit compression format and ipex compression format\n# needed for backward compatibility with older versions of gaudi sw\ndef _reverse_4bit_compress_format(weight: torch.Tensor):\n out_1 = (weight & 0xF0) >> 4\n out_2 = (weight & 0xF) << 4\n out = out_1 | out_2\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit\", \"hpu\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.uint8],\n lambda: f\"quant_storage supports uint8 or bfloat16, but got {A.dtype}\",\n )\n\n # Enable non uint8 dtype\n if A.dtype != torch.uint8:\n A = A.view(torch.uint8)\n\n A = A.reshape(-1)\n\n if GAUDI_SW_VER and (GAUDI_SW_VER.major < 1 or GAUDI_SW_VER.minor < 22):\n A = _reverse_4bit_compress_format(A)\n\n # HPU dequantization function for NF4 quantized tensors.\n out_dq = torch.ops.hpu.dequantize_nf4(\n A,\n absmax.to(dtype),\n blocksize,\n out_shape=(math.prod(shape),),\n out_dtype=dtype,\n )\n\n output = out_dq.reshape(shape)\n\n return output","source_hash":"92d95b146a73a4050c962966f053d6f8433272d48625d6612d8551689ddc0c30","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.hpu.ops._reverse_4bit_compress_format","uri":"program://bitsandbytes/function/bitsandbytes.backends.hpu.ops._reverse_4bit_compress_format#L12-L16","kind":"function","name":"_reverse_4bit_compress_format","path":"bitsandbytes/backends/hpu/ops.py","language":"python","start_line":12,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from collections.abc import Sequence\nimport math\n\nimport torch\n\nfrom ..._ops import register_kernel\nfrom ..utils import GAUDI_SW_VER\n\n\n# convert btw standard 4-bit compression format and ipex compression format\n# needed for backward compatibility with older versions of gaudi sw\ndef _reverse_4bit_compress_format(weight: torch.Tensor):\n out_1 = (weight & 0xF0) >> 4\n out_2 = (weight & 0xF) << 4\n out = out_1 | out_2\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit\", \"hpu\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.uint8],\n lambda: f\"quant_storage supports uint8 or bfloat16, but got {A.dtype}\",\n )\n\n # Enable non uint8 dtype\n if A.dtype != torch.uint8:","source_hash":"92d95b146a73a4050c962966f053d6f8433272d48625d6612d8551689ddc0c30","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.hpu.ops._","uri":"program://bitsandbytes/function/bitsandbytes.backends.hpu.ops._#L20-L55","kind":"function","name":"_","path":"bitsandbytes/backends/hpu/ops.py","language":"python","start_line":20,"end_line":55,"context_start_line":1,"context_end_line":55,"code":"from collections.abc import Sequence\nimport math\n\nimport torch\n\nfrom ..._ops import register_kernel\nfrom ..utils import GAUDI_SW_VER\n\n\n# convert btw standard 4-bit compression format and ipex compression format\n# needed for backward compatibility with older versions of gaudi sw\ndef _reverse_4bit_compress_format(weight: torch.Tensor):\n out_1 = (weight & 0xF0) >> 4\n out_2 = (weight & 0xF) << 4\n out = out_1 | out_2\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit\", \"hpu\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.uint8],\n lambda: f\"quant_storage supports uint8 or bfloat16, but got {A.dtype}\",\n )\n\n # Enable non uint8 dtype\n if A.dtype != torch.uint8:\n A = A.view(torch.uint8)\n\n A = A.reshape(-1)\n\n if GAUDI_SW_VER and (GAUDI_SW_VER.major < 1 or GAUDI_SW_VER.minor < 22):\n A = _reverse_4bit_compress_format(A)\n\n # HPU dequantization function for NF4 quantized tensors.\n out_dq = torch.ops.hpu.dequantize_nf4(\n A,\n absmax.to(dtype),\n blocksize,\n out_shape=(math.prod(shape),),\n out_dtype=dtype,\n )\n\n output = out_dq.reshape(shape)\n\n return output","source_hash":"92d95b146a73a4050c962966f053d6f8433272d48625d6612d8551689ddc0c30","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit","uri":"program://bitsandbytes/module/bitsandbytes.backends.triton.kernels_4bit#L1-L552","kind":"module","name":"bitsandbytes.backends.triton.kernels_4bit","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":1,"end_line":552,"context_start_line":1,"context_end_line":552,"code":"import torch\n\nimport triton\nimport triton.language as tl\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dQuantizeFP4\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 4}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 8}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_fp4_blockwise_kernel(\n A_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n PAIRED_SPLIT_NUM_BLOCKS: tl.constexpr = SPLIT_NUM_BLOCKS * 2\n block_start_idx = tl.program_id(0) * PAIRED_SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n # To be able process several blocks -> (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE)\n A_reshaped = tl.reshape(A, (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE))\n\n # Calculating absamax for each block\n absmax = tl.max(tl.abs(A_reshaped), axis=1)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS), absmax)\n\n A_normalized = A_reshaped / absmax[:, None]\n A_normalized = tl.clamp(A_normalized, -1.0, 1.0)\n\n sign = tl.where(A_normalized < 0, 0b1000, 0b0000)\n A_absf = tl.abs(A_normalized)\n\n result = tl.where(\n A_absf > 0.29166667,\n tl.where(\n A_absf > 0.583333, tl.where(A_absf > 0.8333333, 0b011, 0b010), tl.where(A_absf > 0.4166667, 0b101, 0b100)\n ),\n tl.where(\n A_absf > 0.0859375,\n tl.where(A_absf > 0.20833333, 0b0111, 0b0110),\n tl.where(A_absf > 0.00260417, 0b0001, 0b0000),\n ),\n )\n quantized = (result ^ sign).to(tl.uint8)\n\n quantized = quantized.reshape((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE // 2, 2))\n left, right = quantized.split()\n packed = left << 4 | (right & 0xF)\n\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dQuantizeNF4\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 4}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 8}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_nf4_blockwise_kernel(\n A_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n PAIRED_SPLIT_NUM_BLOCKS: tl.constexpr = SPLIT_NUM_BLOCKS * 2\n block_start_idx = tl.program_id(0) * PAIRED_SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n # To be able process several blocks -> (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE)\n A_reshaped = tl.reshape(A, (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE))\n\n # Calculating absamax for each block\n absmax = tl.max(tl.abs(A_reshaped), axis=1)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS), absmax)\n\n A_normalized = A_reshaped / absmax[:, None]\n A_normalized = tl.clamp(A_normalized, -1.0, 1.0)\n\n result = tl.where(\n A_normalized > 0.03979014977812767,\n tl.where(\n A_normalized > 0.3893125355243683,\n tl.where(\n A_normalized > 0.6427869200706482,\n tl.where(A_normalized > 0.8614784181118011, 0b1111, 0b1110),\n tl.where(A_normalized > 0.5016634166240692, 0b1101, 0b1100),\n ),\n tl.where(\n A_normalized > 0.2035212516784668,\n tl.where(A_normalized > 0.2920137718319893, 0b1011, 0b1010),\n tl.where(A_normalized > 0.1202552504837513, 0b1001, 0b1000),\n ),\n ),\n tl.where(\n A_normalized > -0.33967943489551544,\n tl.where(\n A_normalized > -0.13791173323988914,\n tl.where(A_normalized > -0.045525018125772476, 0b0111, 0b0110),\n tl.where(A_normalized > -0.23460740596055984, 0b0101, 0b0100),\n ),\n tl.where(\n A_normalized > -0.6106329262256622,\n tl.where(A_normalized > -0.4599952697753906, 0b0011, 0b0010),\n tl.where(A_normalized > -0.8480964004993439, 0b0001, 0b0000),\n ),\n ),\n )\n quantized = result.to(tl.uint8)\n\n quantized = quantized.reshape((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE // 2, 2))\n\n left, right = quantized.split()\n packed = left << 4 | (right & 0xF)\n\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)\n\n\ndef quantize_4bit_blockwise_triton(A, blocksize, quant_type, blocks, absmax, num_elements, quantized_out):\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n split_num_blocks = 4\n grid = (triton.cdiv(blocks, split_num_blocks),)\n if quant_type == \"fp4\":\n quantize_fp4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n else:\n quantize_nf4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n return quantized_out, absmax\n\n\n@triton.jit\ndef dequant_4bit_body_util(a, offsets, quant_ptr, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n\n # apply conversion\n lower_4 = tl.load(quant_ptr + lower, eviction_policy=\"evict_last\")\n higher_4 = tl.load(quant_ptr + higher, eviction_policy=\"evict_last\")\n\n mul_high = higher_4 * absmax\n mul_low = lower_4 * absmax\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeFP4Tree\n@triton.jit\ndef dequantize_fp4_tree(val, absmax):\n # val: tl.tensor (uint8)\n # absmax: tl.tensor (float32/float16)\n # 00001100 00001011 00001001 00001111\n sign = tl.where((val & 0b1000) == 0b1000, -1.0, 1.0) # -1\n third_bit = (val & 0b0100) == 0b0100 # True\n second_bit = (val & 0b0010) == 0b0010 # False\n first_bit = (val & 0b0001) == 0b0001 # False\n\n branch1 = tl.where(\n second_bit,\n tl.where(first_bit, 0.25, 0.16666667), # 1111, 1110\n tl.where(first_bit, 0.5, 0.33333333), # 1101, 1100\n )\n branch2 = tl.where(\n second_bit,\n tl.where(first_bit, 1.0, 0.66666667), # 1011, 1010\n tl.where(first_bit, 0.00520833, 0.0), # 1001, 1000\n )\n out = tl.where(third_bit, branch1, branch2)\n return out * sign * absmax\n\n\n@triton.jit\ndef dequant_fp4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_fp4_tree(higher, absmax)\n mul_low = dequantize_fp4_tree(lower, absmax)\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeNF4\n@triton.jit\ndef dequantize_nf4_tree(val):\n # val: tl.tensor (uint8)\n cond0 = (val & 0b1000) == 0b1000\n cond1 = (val & 0b0100) == 0b0100\n cond2 = (val & 0b0010) == 0b0010\n cond3 = (val & 0b0001) == 0b0001\n\n # Positive branch (val & 0b1000) == 8\n branch_pos = tl.where(\n cond1,\n tl.where(\n cond2,\n tl.where(cond3, 1.0, 0.7229568362236023), # 1111, 1110\n tl.where(cond3, 0.5626170039176941, 0.44070982933044434), # 1101, 1100\n ),\n tl.where(\n cond2,\n tl.where(cond3, 0.33791524171829224, 0.24611230194568634), # 1011, 1010\n tl.where(cond3, 0.16093020141124725, 0.07958029955625534), # 1001, 1000\n ),\n )\n\n # Negative branch (val & 0b1000) == 0\n branch_neg = tl.where(\n cond1,\n tl.where(\n cond2,\n tl.where(cond3, 0.0, -0.09105003625154495), # 0111, 0110\n tl.where(cond3, -0.18477343022823334, -0.28444138169288635), # 0101, 0100\n ),\n tl.where(\n cond2,\n tl.where(cond3, -0.39491748809814453, -0.5250730514526367), # 0011, 0010\n tl.where(cond3, -0.6961928009986877, -1.0), # 0001, 0000\n ),\n )\n return tl.where(cond0, branch_pos, branch_neg)\n\n\n@triton.jit\ndef dequant_nf4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_nf4_tree(higher) * absmax\n mul_low = dequantize_nf4_tree(lower) * absmax\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# All such kernels are similar, so maybe code can be generalised.\n# @triton.autotune(\n# configs=[\n# # # triton.Config({'SPLIT_SIZE': 64}),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({'SPLIT_SIZE': 128}),\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# # # triton.Config({'SPLIT_SIZE': 128}, num_warps = 4, num_stages = 4),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# # triton.Config({'SPLIT_SIZE': 256}, num_warps = 4, num_stages = 4),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# # triton.Config({'SPLIT_SIZE': 512}, num_warps = 4, num_stages = 4),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# # # triton.Config({'SPLIT_SIZE': 1024}),\n# # # # triton.Config({'SPLIT_SIZE': 2048}),\n# # # # triton.Config({'SPLIT_SIZE': 4096}),\n# # # # triton.Config({'SPLIT_SIZE': 8192}),\n# # # # triton.Config({'SPLIT_SIZE': 16384}),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_4bit_kernel(\n a_ptr, c_ptr, quant_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_4bit_body_util(\n a=a,\n offsets=offsets,\n quant_ptr=quant_ptr,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 1024}, num_warps = 32, num_stages = 2),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_fp4_kernel(\n a_ptr, c_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_fp4_body_util(\n a=a,\n offsets=offsets,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 1024}, num_warps = 32, num_stages = 2),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_nf4_kernel(\n a_ptr, c_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_nf4_body_util(\n a=a,\n offsets=offsets,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\ndef dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n # It's will be processed as an array, so\n # actual length is row * col\n # Elements are in uint8 format, so interleaved\n # so total amount of data is 2 * elem_count\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)\n if quant_type == \"fp4\":\n dequant_fp4_kernel[grid](A, out, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n else:\n dequant_nf4_kernel[grid](A, out, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n\n\ndef dequantize_4bit_impl_passing_code(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n code: torch.Tensor,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)\n dequant_4bit_kernel[grid](A, out, code, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n\n\n######################### Fallback dequantization functions #########################\n## for debug ##\n\n\n# @triton.autotune(\n# configs=[\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # #\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# #\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"large\"}, num_stages=2, num_warps=32),\n# # # triton.Config({'SPLIT_NUM_BLOCKS': 2, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=2, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 4, \"grf_mode\": \"large\"}, num_stages=2, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 4, \"grf_mode\": \"large\"}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 8, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# ],\n# key=[\"n_elements\", \"BLOCK_SIZE\"],\n# )\n@triton.jit\ndef quantize_4bit_blockwise_kernel(\n A_ptr,\n code_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n CODE_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n PAIRED_SPLIT_NUM_BLOCKS: tl.constexpr = SPLIT_NUM_BLOCKS * 2\n block_start_idx = tl.program_id(0) * PAIRED_SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n # To be able process several blocks -> (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE)\n A_reshaped = tl.reshape(A, (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE))\n\n # Calculating absamax for each block\n absmax = tl.max(tl.abs(A_reshaped), axis=1)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, PAIRE\n# ... truncated ...","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.quantize_fp4_blockwise_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.quantize_fp4_blockwise_kernel#L20-L70","kind":"function","name":"quantize_fp4_blockwise_kernel","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":20,"end_line":70,"context_start_line":1,"context_end_line":90,"code":"import torch\n\nimport triton\nimport triton.language as tl\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dQuantizeFP4\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 4}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 8}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_fp4_blockwise_kernel(\n A_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n PAIRED_SPLIT_NUM_BLOCKS: tl.constexpr = SPLIT_NUM_BLOCKS * 2\n block_start_idx = tl.program_id(0) * PAIRED_SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n # To be able process several blocks -> (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE)\n A_reshaped = tl.reshape(A, (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE))\n\n # Calculating absamax for each block\n absmax = tl.max(tl.abs(A_reshaped), axis=1)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS), absmax)\n\n A_normalized = A_reshaped / absmax[:, None]\n A_normalized = tl.clamp(A_normalized, -1.0, 1.0)\n\n sign = tl.where(A_normalized < 0, 0b1000, 0b0000)\n A_absf = tl.abs(A_normalized)\n\n result = tl.where(\n A_absf > 0.29166667,\n tl.where(\n A_absf > 0.583333, tl.where(A_absf > 0.8333333, 0b011, 0b010), tl.where(A_absf > 0.4166667, 0b101, 0b100)\n ),\n tl.where(\n A_absf > 0.0859375,\n tl.where(A_absf > 0.20833333, 0b0111, 0b0110),\n tl.where(A_absf > 0.00260417, 0b0001, 0b0000),\n ),\n )\n quantized = (result ^ sign).to(tl.uint8)\n\n quantized = quantized.reshape((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE // 2, 2))\n left, right = quantized.split()\n packed = left << 4 | (right & 0xF)\n\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dQuantizeNF4\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 4}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 8}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_nf4_blockwise_kernel(\n A_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.quantize_nf4_blockwise_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.quantize_nf4_blockwise_kernel#L86-L152","kind":"function","name":"quantize_nf4_blockwise_kernel","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":86,"end_line":152,"context_start_line":66,"context_end_line":172,"code":"\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dQuantizeNF4\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 4}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 8}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_nf4_blockwise_kernel(\n A_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n PAIRED_SPLIT_NUM_BLOCKS: tl.constexpr = SPLIT_NUM_BLOCKS * 2\n block_start_idx = tl.program_id(0) * PAIRED_SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n # To be able process several blocks -> (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE)\n A_reshaped = tl.reshape(A, (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE))\n\n # Calculating absamax for each block\n absmax = tl.max(tl.abs(A_reshaped), axis=1)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS), absmax)\n\n A_normalized = A_reshaped / absmax[:, None]\n A_normalized = tl.clamp(A_normalized, -1.0, 1.0)\n\n result = tl.where(\n A_normalized > 0.03979014977812767,\n tl.where(\n A_normalized > 0.3893125355243683,\n tl.where(\n A_normalized > 0.6427869200706482,\n tl.where(A_normalized > 0.8614784181118011, 0b1111, 0b1110),\n tl.where(A_normalized > 0.5016634166240692, 0b1101, 0b1100),\n ),\n tl.where(\n A_normalized > 0.2035212516784668,\n tl.where(A_normalized > 0.2920137718319893, 0b1011, 0b1010),\n tl.where(A_normalized > 0.1202552504837513, 0b1001, 0b1000),\n ),\n ),\n tl.where(\n A_normalized > -0.33967943489551544,\n tl.where(\n A_normalized > -0.13791173323988914,\n tl.where(A_normalized > -0.045525018125772476, 0b0111, 0b0110),\n tl.where(A_normalized > -0.23460740596055984, 0b0101, 0b0100),\n ),\n tl.where(\n A_normalized > -0.6106329262256622,\n tl.where(A_normalized > -0.4599952697753906, 0b0011, 0b0010),\n tl.where(A_normalized > -0.8480964004993439, 0b0001, 0b0000),\n ),\n ),\n )\n quantized = result.to(tl.uint8)\n\n quantized = quantized.reshape((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE // 2, 2))\n\n left, right = quantized.split()\n packed = left << 4 | (right & 0xF)\n\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)\n\n\ndef quantize_4bit_blockwise_triton(A, blocksize, quant_type, blocks, absmax, num_elements, quantized_out):\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n split_num_blocks = 4\n grid = (triton.cdiv(blocks, split_num_blocks),)\n if quant_type == \"fp4\":\n quantize_fp4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n else:\n quantize_nf4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.quantize_4bit_blockwise_triton","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.quantize_4bit_blockwise_triton#L155-L177","kind":"function","name":"quantize_4bit_blockwise_triton","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":155,"end_line":177,"context_start_line":135,"context_end_line":197,"code":" tl.where(\n A_normalized > -0.6106329262256622,\n tl.where(A_normalized > -0.4599952697753906, 0b0011, 0b0010),\n tl.where(A_normalized > -0.8480964004993439, 0b0001, 0b0000),\n ),\n ),\n )\n quantized = result.to(tl.uint8)\n\n quantized = quantized.reshape((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE // 2, 2))\n\n left, right = quantized.split()\n packed = left << 4 | (right & 0xF)\n\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)\n\n\ndef quantize_4bit_blockwise_triton(A, blocksize, quant_type, blocks, absmax, num_elements, quantized_out):\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n split_num_blocks = 4\n grid = (triton.cdiv(blocks, split_num_blocks),)\n if quant_type == \"fp4\":\n quantize_fp4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n else:\n quantize_nf4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n return quantized_out, absmax\n\n\n@triton.jit\ndef dequant_4bit_body_util(a, offsets, quant_ptr, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n\n # apply conversion\n lower_4 = tl.load(quant_ptr + lower, eviction_policy=\"evict_last\")\n higher_4 = tl.load(quant_ptr + higher, eviction_policy=\"evict_last\")\n\n mul_high = higher_4 * absmax\n mul_low = lower_4 * absmax\n out_dq = tl.interleave(mul_low, mul_high)","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequant_4bit_body_util","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequant_4bit_body_util#L181-L198","kind":"function","name":"dequant_4bit_body_util","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":181,"end_line":198,"context_start_line":161,"context_end_line":218,"code":" A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n else:\n quantize_nf4_blockwise_kernel[grid](\n A_ptr=A,\n absmax_ptr=absmax,\n out_ptr=quantized_out,\n n_elements=num_elements,\n BLOCK_SIZE=blocksize,\n SPLIT_NUM_BLOCKS=split_num_blocks,\n )\n return quantized_out, absmax\n\n\n@triton.jit\ndef dequant_4bit_body_util(a, offsets, quant_ptr, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n\n # apply conversion\n lower_4 = tl.load(quant_ptr + lower, eviction_policy=\"evict_last\")\n higher_4 = tl.load(quant_ptr + higher, eviction_policy=\"evict_last\")\n\n mul_high = higher_4 * absmax\n mul_low = lower_4 * absmax\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeFP4Tree\n@triton.jit\ndef dequantize_fp4_tree(val, absmax):\n # val: tl.tensor (uint8)\n # absmax: tl.tensor (float32/float16)\n # 00001100 00001011 00001001 00001111\n sign = tl.where((val & 0b1000) == 0b1000, -1.0, 1.0) # -1\n third_bit = (val & 0b0100) == 0b0100 # True\n second_bit = (val & 0b0010) == 0b0010 # False\n first_bit = (val & 0b0001) == 0b0001 # False\n\n branch1 = tl.where(\n second_bit,\n tl.where(first_bit, 0.25, 0.16666667), # 1111, 1110\n tl.where(first_bit, 0.5, 0.33333333), # 1101, 1100\n )\n branch2 = tl.where(\n second_bit,","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequantize_fp4_tree","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequantize_fp4_tree#L203-L223","kind":"function","name":"dequantize_fp4_tree","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":203,"end_line":223,"context_start_line":183,"context_end_line":243,"code":" mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n\n # apply conversion\n lower_4 = tl.load(quant_ptr + lower, eviction_policy=\"evict_last\")\n higher_4 = tl.load(quant_ptr + higher, eviction_policy=\"evict_last\")\n\n mul_high = higher_4 * absmax\n mul_low = lower_4 * absmax\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeFP4Tree\n@triton.jit\ndef dequantize_fp4_tree(val, absmax):\n # val: tl.tensor (uint8)\n # absmax: tl.tensor (float32/float16)\n # 00001100 00001011 00001001 00001111\n sign = tl.where((val & 0b1000) == 0b1000, -1.0, 1.0) # -1\n third_bit = (val & 0b0100) == 0b0100 # True\n second_bit = (val & 0b0010) == 0b0010 # False\n first_bit = (val & 0b0001) == 0b0001 # False\n\n branch1 = tl.where(\n second_bit,\n tl.where(first_bit, 0.25, 0.16666667), # 1111, 1110\n tl.where(first_bit, 0.5, 0.33333333), # 1101, 1100\n )\n branch2 = tl.where(\n second_bit,\n tl.where(first_bit, 1.0, 0.66666667), # 1011, 1010\n tl.where(first_bit, 0.00520833, 0.0), # 1001, 1000\n )\n out = tl.where(third_bit, branch1, branch2)\n return out * sign * absmax\n\n\n@triton.jit\ndef dequant_fp4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_fp4_tree(higher, absmax)\n mul_low = dequantize_fp4_tree(lower, absmax)\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeNF4\n@triton.jit\ndef dequantize_nf4_tree(val):","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequant_fp4_body_util","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequant_fp4_body_util#L227-L238","kind":"function","name":"dequant_fp4_body_util","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":227,"end_line":238,"context_start_line":207,"context_end_line":258,"code":" sign = tl.where((val & 0b1000) == 0b1000, -1.0, 1.0) # -1\n third_bit = (val & 0b0100) == 0b0100 # True\n second_bit = (val & 0b0010) == 0b0010 # False\n first_bit = (val & 0b0001) == 0b0001 # False\n\n branch1 = tl.where(\n second_bit,\n tl.where(first_bit, 0.25, 0.16666667), # 1111, 1110\n tl.where(first_bit, 0.5, 0.33333333), # 1101, 1100\n )\n branch2 = tl.where(\n second_bit,\n tl.where(first_bit, 1.0, 0.66666667), # 1011, 1010\n tl.where(first_bit, 0.00520833, 0.0), # 1001, 1000\n )\n out = tl.where(third_bit, branch1, branch2)\n return out * sign * absmax\n\n\n@triton.jit\ndef dequant_fp4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_fp4_tree(higher, absmax)\n mul_low = dequantize_fp4_tree(lower, absmax)\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeNF4\n@triton.jit\ndef dequantize_nf4_tree(val):\n # val: tl.tensor (uint8)\n cond0 = (val & 0b1000) == 0b1000\n cond1 = (val & 0b0100) == 0b0100\n cond2 = (val & 0b0010) == 0b0010\n cond3 = (val & 0b0001) == 0b0001\n\n # Positive branch (val & 0b1000) == 8\n branch_pos = tl.where(\n cond1,\n tl.where(\n cond2,\n tl.where(cond3, 1.0, 0.7229568362236023), # 1111, 1110\n tl.where(cond3, 0.5626170039176941, 0.44070982933044434), # 1101, 1100\n ),\n tl.where(","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequantize_nf4_tree","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequantize_nf4_tree#L243-L279","kind":"function","name":"dequantize_nf4_tree","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":243,"end_line":279,"context_start_line":223,"context_end_line":299,"code":" return out * sign * absmax\n\n\n@triton.jit\ndef dequant_fp4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_fp4_tree(higher, absmax)\n mul_low = dequantize_fp4_tree(lower, absmax)\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dDequantizeNF4\n@triton.jit\ndef dequantize_nf4_tree(val):\n # val: tl.tensor (uint8)\n cond0 = (val & 0b1000) == 0b1000\n cond1 = (val & 0b0100) == 0b0100\n cond2 = (val & 0b0010) == 0b0010\n cond3 = (val & 0b0001) == 0b0001\n\n # Positive branch (val & 0b1000) == 8\n branch_pos = tl.where(\n cond1,\n tl.where(\n cond2,\n tl.where(cond3, 1.0, 0.7229568362236023), # 1111, 1110\n tl.where(cond3, 0.5626170039176941, 0.44070982933044434), # 1101, 1100\n ),\n tl.where(\n cond2,\n tl.where(cond3, 0.33791524171829224, 0.24611230194568634), # 1011, 1010\n tl.where(cond3, 0.16093020141124725, 0.07958029955625534), # 1001, 1000\n ),\n )\n\n # Negative branch (val & 0b1000) == 0\n branch_neg = tl.where(\n cond1,\n tl.where(\n cond2,\n tl.where(cond3, 0.0, -0.09105003625154495), # 0111, 0110\n tl.where(cond3, -0.18477343022823334, -0.28444138169288635), # 0101, 0100\n ),\n tl.where(\n cond2,\n tl.where(cond3, -0.39491748809814453, -0.5250730514526367), # 0011, 0010\n tl.where(cond3, -0.6961928009986877, -1.0), # 0001, 0000\n ),\n )\n return tl.where(cond0, branch_pos, branch_neg)\n\n\n@triton.jit\ndef dequant_nf4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_nf4_tree(higher) * absmax\n mul_low = dequantize_nf4_tree(lower) * absmax\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# All such kernels are similar, so maybe code can be generalised.\n# @triton.autotune(","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequant_nf4_body_util","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequant_nf4_body_util#L283-L295","kind":"function","name":"dequant_nf4_body_util","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":283,"end_line":295,"context_start_line":263,"context_end_line":315,"code":" )\n\n # Negative branch (val & 0b1000) == 0\n branch_neg = tl.where(\n cond1,\n tl.where(\n cond2,\n tl.where(cond3, 0.0, -0.09105003625154495), # 0111, 0110\n tl.where(cond3, -0.18477343022823334, -0.28444138169288635), # 0101, 0100\n ),\n tl.where(\n cond2,\n tl.where(cond3, -0.39491748809814453, -0.5250730514526367), # 0011, 0010\n tl.where(cond3, -0.6961928009986877, -1.0), # 0001, 0000\n ),\n )\n return tl.where(cond0, branch_pos, branch_neg)\n\n\n@triton.jit\ndef dequant_nf4_body_util(a, offsets, absmax_ptr, n_elems, QUANT_BLOCK: tl.constexpr):\n PAIRED_QUANT_BLOCK: tl.constexpr = QUANT_BLOCK // 2\n mask = offsets < n_elems\n higher = a & 0xF\n # lower 4bits\n lower = a >> 4\n\n abs_offsets = offsets // PAIRED_QUANT_BLOCK\n absmax = tl.load(absmax_ptr + abs_offsets, mask=mask, other=1.0, eviction_policy=\"evict_last\")\n mul_high = dequantize_nf4_tree(higher) * absmax\n mul_low = dequantize_nf4_tree(lower) * absmax\n out_dq = tl.interleave(mul_low, mul_high)\n return out_dq\n\n\n# All such kernels are similar, so maybe code can be generalised.\n# @triton.autotune(\n# configs=[\n# # # triton.Config({'SPLIT_SIZE': 64}),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({'SPLIT_SIZE': 128}),\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# # # triton.Config({'SPLIT_SIZE': 128}, num_warps = 4, num_stages = 4),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# # triton.Config({'SPLIT_SIZE': 256}, num_warps = 4, num_stages = 4),","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequant_4bit_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequant_4bit_kernel#L332-L354","kind":"function","name":"dequant_4bit_kernel","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":332,"end_line":354,"context_start_line":312,"context_end_line":374,"code":"# # # # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# # triton.Config({'SPLIT_SIZE': 256}, num_warps = 4, num_stages = 4),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# # triton.Config({'SPLIT_SIZE': 512}, num_warps = 4, num_stages = 4),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # # # triton.Config({'SPLIT_SIZE': 512, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# # # triton.Config({'SPLIT_SIZE': 1024}),\n# # # # triton.Config({'SPLIT_SIZE': 2048}),\n# # # # triton.Config({'SPLIT_SIZE': 4096}),\n# # # # triton.Config({'SPLIT_SIZE': 8192}),\n# # # # triton.Config({'SPLIT_SIZE': 16384}),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_4bit_kernel(\n a_ptr, c_ptr, quant_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_4bit_body_util(\n a=a,\n offsets=offsets,\n quant_ptr=quant_ptr,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 1024}, num_warps = 32, num_stages = 2),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_fp4_kernel(\n a_ptr, c_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequant_fp4_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequant_fp4_kernel#L369-L390","kind":"function","name":"dequant_fp4_kernel","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":369,"end_line":390,"context_start_line":349,"context_end_line":410,"code":" )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 1024}, num_warps = 32, num_stages = 2),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_fp4_kernel(\n a_ptr, c_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_fp4_body_util(\n a=a,\n offsets=offsets,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 1024}, num_warps = 32, num_stages = 2),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_nf4_kernel(\n a_ptr, c_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequant_nf4_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequant_nf4_kernel#L405-L426","kind":"function","name":"dequant_nf4_kernel","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":405,"end_line":426,"context_start_line":385,"context_end_line":446,"code":" )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({'SPLIT_SIZE': 128}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 256}),\n# triton.Config({'SPLIT_SIZE': 256}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 512}),\n# triton.Config({'SPLIT_SIZE': 512}, num_warps = 32, num_stages = 2),\n# triton.Config({'SPLIT_SIZE': 1024}, num_warps = 32, num_stages = 2),\n# ],\n# key=['num_paired_elements'],\n# )\n@triton.jit\ndef dequant_nf4_kernel(\n a_ptr, c_ptr, absmax_ptr, num_paired_elements, QUANT_BLOCK: tl.constexpr, SPLIT_SIZE: tl.constexpr\n):\n pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_nf4_body_util(\n a=a,\n offsets=offsets,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\ndef dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n # It's will be processed as an array, so\n # actual length is row * col\n # Elements are in uint8 format, so interleaved\n # so total amount of data is 2 * elem_count\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequantize_4bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequantize_4bit_impl#L429-L450","kind":"function","name":"dequantize_4bit_impl","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":429,"end_line":450,"context_start_line":409,"context_end_line":470,"code":" block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < num_paired_elements\n\n a = tl.load(a_ptr + offsets, mask, eviction_policy=\"evict_first\")\n\n out_dq = dequant_nf4_body_util(\n a=a,\n offsets=offsets,\n absmax_ptr=absmax_ptr,\n n_elems=num_paired_elements,\n QUANT_BLOCK=QUANT_BLOCK,\n )\n\n out_block_start = pid * SPLIT_SIZE * 2\n offs = out_block_start + tl.arange(0, SPLIT_SIZE * 2)\n mask = offs < num_paired_elements * 2\n tl.store(c_ptr + offs, out_dq, mask)\n\n\ndef dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n # It's will be processed as an array, so\n # actual length is row * col\n # Elements are in uint8 format, so interleaved\n # so total amount of data is 2 * elem_count\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)\n if quant_type == \"fp4\":\n dequant_fp4_kernel[grid](A, out, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n else:\n dequant_nf4_kernel[grid](A, out, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n\n\ndef dequantize_4bit_impl_passing_code(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n code: torch.Tensor,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)\n dequant_4bit_kernel[grid](A, out, code, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n\n\n######################### Fallback dequantization functions #########################","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.dequantize_4bit_impl_passing_code","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.dequantize_4bit_impl_passing_code#L453-L467","kind":"function","name":"dequantize_4bit_impl_passing_code","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":453,"end_line":467,"context_start_line":433,"context_end_line":487,"code":" quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n # It's will be processed as an array, so\n # actual length is row * col\n # Elements are in uint8 format, so interleaved\n # so total amount of data is 2 * elem_count\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)\n if quant_type == \"fp4\":\n dequant_fp4_kernel[grid](A, out, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n else:\n dequant_nf4_kernel[grid](A, out, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n\n\ndef dequantize_4bit_impl_passing_code(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n code: torch.Tensor,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n number_of_paired_elements = A.numel()\n # we assume that split_size > quant_blocksize\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META['SPLIT_SIZE']), )\n grid = (triton.cdiv(number_of_paired_elements, SPLIT_SIZE),)\n dequant_4bit_kernel[grid](A, out, code, absmax, number_of_paired_elements, blocksize, SPLIT_SIZE)\n\n\n######################### Fallback dequantization functions #########################\n## for debug ##\n\n\n# @triton.autotune(\n# configs=[\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # #\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# #\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"large\"}, num_stages=2, num_warps=32),\n# # # triton.Config({'SPLIT_NUM_BLOCKS': 2, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=2, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 4, \"grf_mode\": \"large\"}, num_stages=2, num_warps=32),","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_4bit.quantize_4bit_blockwise_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_4bit.quantize_4bit_blockwise_kernel#L494-L552","kind":"function","name":"quantize_4bit_blockwise_kernel","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":494,"end_line":552,"context_start_line":474,"context_end_line":552,"code":"# @triton.autotune(\n# configs=[\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 1, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # #\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# #\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"large\"}, num_stages=2, num_warps=32),\n# # # triton.Config({'SPLIT_NUM_BLOCKS': 2, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=2, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 4, \"grf_mode\": \"large\"}, num_stages=2, num_warps=32),\n# # triton.Config({\"SPLIT_NUM_BLOCKS\": 4, \"grf_mode\": \"large\"}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_NUM_BLOCKS': 8, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# ],\n# key=[\"n_elements\", \"BLOCK_SIZE\"],\n# )\n@triton.jit\ndef quantize_4bit_blockwise_kernel(\n A_ptr,\n code_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n CODE_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n PAIRED_SPLIT_NUM_BLOCKS: tl.constexpr = SPLIT_NUM_BLOCKS * 2\n block_start_idx = tl.program_id(0) * PAIRED_SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n # To be able process several blocks -> (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE)\n A_reshaped = tl.reshape(A, (PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE))\n\n # Calculating absamax for each block\n absmax = tl.max(tl.abs(A_reshaped), axis=1)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, PAIRED_SPLIT_NUM_BLOCKS), absmax)\n\n A_normalized = A_reshaped / absmax[:, None]\n A_normalized = tl.clamp(A_normalized, -1.0, 1.0)\n\n lower_pivot = tl.zeros((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE), dtype=tl.int32)\n upper_pivot = tl.full((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE), CODE_SIZE - 1, dtype=tl.int32)\n\n for _ in range(4): # ceil(log2(code_size)) = 4, actually, in general case should be input parameter\n pivot = (lower_pivot + upper_pivot) // 2\n val = tl.load(code_ptr + pivot)\n is_higher = A_normalized > val # code[pivot]\n lower_pivot = tl.where(is_higher, pivot, lower_pivot)\n upper_pivot = tl.where(is_higher, upper_pivot, pivot)\n\n # Choose closest level\n lower_val = tl.load(code_ptr + lower_pivot)\n upper_val = tl.load(code_ptr + upper_pivot)\n lower_dist = tl.abs(A_normalized - lower_val)\n upper_dist = tl.abs(A_normalized - upper_val)\n quantized = tl.where(lower_dist <= upper_dist, lower_pivot, upper_pivot).to(tl.uint8)\n\n quantized = quantized.reshape((PAIRED_SPLIT_NUM_BLOCKS, BLOCK_SIZE // 2, 2))\n quantized = quantized.to(tl.uint8, bitcast=True)\n left, right = quantized.split()\n packed = left << 4 | (right & 0xF)\n\n # Reduce don't guarantee the order of the elements passed to unite_2_int4\n # packed = tl.reduce(quantized, axis=2, combine_fn=unite_2_int4)\n # packed = packed.to(tl.uint8, bitcast=True)\n\n packed_flat = tl.reshape(packed, (BLOCK_SIZE * SPLIT_NUM_BLOCKS,))\n out_offsets = block_start_idx * BLOCK_SIZE // 2 + tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n out_mask = out_offsets < n_elements // 2\n tl.store(out_ptr + out_offsets, packed_flat, mask=out_mask)","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops","uri":"program://bitsandbytes/module/bitsandbytes.backends.triton.ops#L1-L297","kind":"module","name":"bitsandbytes.backends.triton.ops","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":1,"end_line":297,"context_start_line":1,"context_end_line":297,"code":"from collections.abc import Sequence\nfrom typing import Optional\n\nimport torch\n\nfrom . import kernels_4bit, kernels_8bit_quant, kernels_optim\n\n# currently codes unused, kept for reference\n# Should be the same for quant/dequant\n# from bitsandbytes.functional import get_4bit_type\n# _FP4_QUANT_TABLE = get_4bit_type(\"fp4\", device=\"xpu\")\n# _NF4_QUANT_TABLE = get_4bit_type(\"nf4\", device=\"xpu\")\ndevice_type = torch.accelerator.current_accelerator().type if hasattr(torch, \"accelerator\") else \"cuda\"\ntorch_accelerator_module = getattr(torch, device_type, torch.cuda)\n\n\ndef quantize_blockwise(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(A.dtype == torch.float32, lambda: f\"A must be float32 on xpu, got {A.dtype}\")\n with torch_accelerator_module.device(A.device):\n out, absmax = kernels_8bit_quant.quantize_blockwise_triton(A, code, blocksize)\n return out, absmax.float()\n\n\ndef dequantize_blockwise(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n # torch._check(dtype == torch.float32, lambda: f\"dtype must be float32 on xpu, got {dtype}\")\n with torch_accelerator_module.device(A.device):\n out = kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n )\n return out\n\n\ndef dequantize_blockwise_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n\n with torch_accelerator_module.device(A.device):\n kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n out=out,\n )\n\n\ndef quantize_4bit(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on CPU, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit quantization only supports 16/32-bit floats, but got {A.dtype}\",\n )\n\n n = A.numel()\n\n # TODO: Support when weight matrix is not divisible by blocksize\n # torch._check(n % blocksize == 0, lambda: f\"n must be divisible by blocksize, got {n} and {blocksize}\")\n\n blocks = -(n // -(blocksize * 2))\n\n absmax = torch.empty((blocks * 2,), device=A.device, dtype=A.dtype)\n out = torch.empty((n // 2, 1), device=A.device, dtype=torch.uint8)\n\n with torch_accelerator_module.device(A.device):\n kernels_4bit.quantize_4bit_blockwise_triton(\n A, blocksize, quant_type, blocks, absmax, num_elements=n, quantized_out=out\n )\n packed = out\n\n if quant_storage != torch.uint8:\n packed = out.squeeze().view(quant_storage).unsqueeze(1)\n\n return packed, absmax.float()\n\n\ndef dequantize_4bit(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on XPU, got {quant_type}\")\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n # torch._check(\n # A.dtype == torch.uint8,\n # lambda: f\"Blockwise 4bit dequantization on XPU only supports uint8 storage, got {A.dtype}\",\n # )\n # Check if this is fine and fast\n if A.dtype != torch.uint8:\n A = A.squeeze().view(torch.uint8).unsqueeze(1)\n\n out = torch.empty(shape, dtype=dtype, device=A.device)\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n return out\n\n\ndef dequantize_4bit_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n\ndef gemv_4bit(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> torch.Tensor:\n if B.dtype != torch.uint8:\n B = B.squeeze().view(torch.uint8).unsqueeze(1)\n\n B_dq_triton = torch.empty(shapeB, dtype=A.dtype, device=A.device)\n\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl_passing_code(\n B,\n absmax,\n blocksize,\n code,\n dtype=A.dtype,\n out=B_dq_triton,\n )\n\n return torch.nn.functional.linear(\n A,\n B_dq_triton,\n bias=None,\n )\n\n\n# optimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_pytorch\n# optimizer_update_8bit_blockwise_impl = torch.compile(kernels_optim.optimizer_update_8bit_blockwise_pytorch) # 60ms\n# optimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_triton_quant #2.8ms\n# optimizer_update_8bit_blockwise_impl = torch.compile(kernels_optim.optimizer_update_8bit_blockwise_triton_quant) # 2.3ms\noptimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_impl # ~0.95ms for adam\n\n\ndef optimizer_update_8bit_blockwise(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n # torch._check(\n # g.numel() == p.numel(),\n # lambda: f\"g and p must have the same number of elements, got {g.numel()} and {p.numel()}\",\n # )\n # compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n # torch._check(\n # g.dtype in compute_dtypes,\n # lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n # )\n # torch._check(\n # g.dtype == p.dtype,\n # lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n # )\n # torch._check(\n # state1.dtype == torch.uint8,\n # lambda: f\"state1 must be uint8, got {state1.dtype}\",\n # )\n # torch._check(\n # qmap1.dtype == absmax1.dtype == torch.float32,\n # lambda: f\"Expected qmap1 and absmax1 to be float32, got qmap1.dtype={qmap1.dtype}, absmax1.dtype={absmax1.dtype}\",\n # )\n # if state2 is not None:\n # torch._check(\n # state2.dtype == torch.uint8,\n # lambda: f\"state2 must be uint8, got {state2.dtype}\",\n # )\n # torch._check(\n # qmap2.dtype == absmax2.dtype == torch.float32,\n # lambda: f\"Expected qmap2 and absmax2 to be float32, got qmap2.dtype={qmap2.dtype}, absmax2.dtype={absmax2.dtype}\",\n # )\n\n with torch_accelerator_module.device(state1.device):\n optimizer_update_8bit_blockwise_impl(\n optimizer_name=optimizer_name,\n g=g,\n p=p,\n state1=state1,\n state2=state2,\n beta1=beta1,\n beta2=beta2,\n beta3=beta3,\n alpha=alpha,\n eps=eps,\n step=step,\n lr=lr,\n qmap1=qmap1,\n qmap2=qmap2,\n absmax1=absmax1,\n absmax2=absmax2,\n weight_decay=weight_decay,\n gnorm_scale=gnorm_scale,\n skip_zeros=skip_zeros,\n )\n\n\ndef optimizer_update_32bit(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n with torch_accelerator_module.device(state1.device):\n kernels_optim.optimizer_update_32bit_impl(\n optimizer_name=optimizer_name,\n g=g,\n p=p,\n state1=state1,\n state2=state2,\n unorm_vec=unorm_vec,\n max_unorm=max_unorm,\n param_norm=param_norm,\n beta1=beta1,\n beta2=beta2,\n beta3=beta3,\n alpha=alpha,\n eps=eps,\n weight_decay=weight_decay,\n step=step,\n lr=lr,\n gnorm_scale=gnorm_scale,\n skip_zeros=skip_zeros,\n )","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.quantize_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.quantize_blockwise#L17-L22","kind":"function","name":"quantize_blockwise","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":17,"end_line":22,"context_start_line":1,"context_end_line":42,"code":"from collections.abc import Sequence\nfrom typing import Optional\n\nimport torch\n\nfrom . import kernels_4bit, kernels_8bit_quant, kernels_optim\n\n# currently codes unused, kept for reference\n# Should be the same for quant/dequant\n# from bitsandbytes.functional import get_4bit_type\n# _FP4_QUANT_TABLE = get_4bit_type(\"fp4\", device=\"xpu\")\n# _NF4_QUANT_TABLE = get_4bit_type(\"nf4\", device=\"xpu\")\ndevice_type = torch.accelerator.current_accelerator().type if hasattr(torch, \"accelerator\") else \"cuda\"\ntorch_accelerator_module = getattr(torch, device_type, torch.cuda)\n\n\ndef quantize_blockwise(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(A.dtype == torch.float32, lambda: f\"A must be float32 on xpu, got {A.dtype}\")\n with torch_accelerator_module.device(A.device):\n out, absmax = kernels_8bit_quant.quantize_blockwise_triton(A, code, blocksize)\n return out, absmax.float()\n\n\ndef dequantize_blockwise(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n # torch._check(dtype == torch.float32, lambda: f\"dtype must be float32 on xpu, got {dtype}\")\n with torch_accelerator_module.device(A.device):\n out = kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n )\n return out\n\n\ndef dequantize_blockwise_inplace(","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.dequantize_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.dequantize_blockwise#L25-L39","kind":"function","name":"dequantize_blockwise","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":25,"end_line":39,"context_start_line":5,"context_end_line":59,"code":"\nfrom . import kernels_4bit, kernels_8bit_quant, kernels_optim\n\n# currently codes unused, kept for reference\n# Should be the same for quant/dequant\n# from bitsandbytes.functional import get_4bit_type\n# _FP4_QUANT_TABLE = get_4bit_type(\"fp4\", device=\"xpu\")\n# _NF4_QUANT_TABLE = get_4bit_type(\"nf4\", device=\"xpu\")\ndevice_type = torch.accelerator.current_accelerator().type if hasattr(torch, \"accelerator\") else \"cuda\"\ntorch_accelerator_module = getattr(torch, device_type, torch.cuda)\n\n\ndef quantize_blockwise(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(A.dtype == torch.float32, lambda: f\"A must be float32 on xpu, got {A.dtype}\")\n with torch_accelerator_module.device(A.device):\n out, absmax = kernels_8bit_quant.quantize_blockwise_triton(A, code, blocksize)\n return out, absmax.float()\n\n\ndef dequantize_blockwise(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n # torch._check(dtype == torch.float32, lambda: f\"dtype must be float32 on xpu, got {dtype}\")\n with torch_accelerator_module.device(A.device):\n out = kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n )\n return out\n\n\ndef dequantize_blockwise_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n\n with torch_accelerator_module.device(A.device):\n kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.dequantize_blockwise_inplace","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.dequantize_blockwise_inplace#L42-L64","kind":"function","name":"dequantize_blockwise_inplace","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":42,"end_line":64,"context_start_line":22,"context_end_line":84,"code":" return out, absmax.float()\n\n\ndef dequantize_blockwise(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n # torch._check(dtype == torch.float32, lambda: f\"dtype must be float32 on xpu, got {dtype}\")\n with torch_accelerator_module.device(A.device):\n out = kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n )\n return out\n\n\ndef dequantize_blockwise_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n\n with torch_accelerator_module.device(A.device):\n kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n out=out,\n )\n\n\ndef quantize_4bit(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on CPU, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit quantization only supports 16/32-bit floats, but got {A.dtype}\",\n )\n\n n = A.numel()\n\n # TODO: Support when weight matrix is not divisible by blocksize\n # torch._check(n % blocksize == 0, lambda: f\"n must be divisible by blocksize, got {n} and {blocksize}\")\n\n blocks = -(n // -(blocksize * 2))\n\n absmax = torch.empty((blocks * 2,), device=A.device, dtype=A.dtype)","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.quantize_4bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.quantize_4bit#L67-L96","kind":"function","name":"quantize_4bit","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":67,"end_line":96,"context_start_line":47,"context_end_line":116,"code":" dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n torch._check(out.device == A.device, lambda: f\"Expected out.device == {A.device}, got {out.device}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n\n with torch_accelerator_module.device(A.device):\n kernels_8bit_quant.dequant_8bit_blockwise(\n A,\n absmax,\n code,\n blocksize,\n dtype=dtype,\n out=out,\n )\n\n\ndef quantize_4bit(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on CPU, got {quant_type}\")\n torch._check(\n A.dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit quantization only supports 16/32-bit floats, but got {A.dtype}\",\n )\n\n n = A.numel()\n\n # TODO: Support when weight matrix is not divisible by blocksize\n # torch._check(n % blocksize == 0, lambda: f\"n must be divisible by blocksize, got {n} and {blocksize}\")\n\n blocks = -(n // -(blocksize * 2))\n\n absmax = torch.empty((blocks * 2,), device=A.device, dtype=A.dtype)\n out = torch.empty((n // 2, 1), device=A.device, dtype=torch.uint8)\n\n with torch_accelerator_module.device(A.device):\n kernels_4bit.quantize_4bit_blockwise_triton(\n A, blocksize, quant_type, blocks, absmax, num_elements=n, quantized_out=out\n )\n packed = out\n\n if quant_storage != torch.uint8:\n packed = out.squeeze().view(quant_storage).unsqueeze(1)\n\n return packed, absmax.float()\n\n\ndef dequantize_4bit(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on XPU, got {quant_type}\")\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n # torch._check(\n # A.dtype == torch.uint8,\n # lambda: f\"Blockwise 4bit dequantization on XPU only supports uint8 storage, got {A.dtype}\",\n # )","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.dequantize_4bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.dequantize_4bit#L99-L125","kind":"function","name":"dequantize_4bit","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":99,"end_line":125,"context_start_line":79,"context_end_line":145,"code":" # TODO: Support when weight matrix is not divisible by blocksize\n # torch._check(n % blocksize == 0, lambda: f\"n must be divisible by blocksize, got {n} and {blocksize}\")\n\n blocks = -(n // -(blocksize * 2))\n\n absmax = torch.empty((blocks * 2,), device=A.device, dtype=A.dtype)\n out = torch.empty((n // 2, 1), device=A.device, dtype=torch.uint8)\n\n with torch_accelerator_module.device(A.device):\n kernels_4bit.quantize_4bit_blockwise_triton(\n A, blocksize, quant_type, blocks, absmax, num_elements=n, quantized_out=out\n )\n packed = out\n\n if quant_storage != torch.uint8:\n packed = out.squeeze().view(quant_storage).unsqueeze(1)\n\n return packed, absmax.float()\n\n\ndef dequantize_4bit(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n torch._check_is_size(blocksize)\n # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on XPU, got {quant_type}\")\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n # torch._check(\n # A.dtype == torch.uint8,\n # lambda: f\"Blockwise 4bit dequantization on XPU only supports uint8 storage, got {A.dtype}\",\n # )\n # Check if this is fine and fast\n if A.dtype != torch.uint8:\n A = A.squeeze().view(torch.uint8).unsqueeze(1)\n\n out = torch.empty(shape, dtype=dtype, device=A.device)\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n return out\n\n\ndef dequantize_4bit_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n\ndef gemv_4bit(\n A: torch.Tensor,\n B: torch.Tensor,","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.dequantize_4bit_inplace","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.dequantize_4bit_inplace#L128-L140","kind":"function","name":"dequantize_4bit_inplace","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":128,"end_line":140,"context_start_line":108,"context_end_line":160,"code":" # torch._check(quant_type == \"nf4\", lambda: f\"quant_type must be nf4 on XPU, got {quant_type}\")\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n # torch._check(\n # A.dtype == torch.uint8,\n # lambda: f\"Blockwise 4bit dequantization on XPU only supports uint8 storage, got {A.dtype}\",\n # )\n # Check if this is fine and fast\n if A.dtype != torch.uint8:\n A = A.squeeze().view(torch.uint8).unsqueeze(1)\n\n out = torch.empty(shape, dtype=dtype, device=A.device)\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n return out\n\n\ndef dequantize_4bit_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n\ndef gemv_4bit(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> torch.Tensor:\n if B.dtype != torch.uint8:\n B = B.squeeze().view(torch.uint8).unsqueeze(1)\n\n B_dq_triton = torch.empty(shapeB, dtype=A.dtype, device=A.device)\n\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl_passing_code(\n B,\n absmax,\n blocksize,","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.gemv_4bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.gemv_4bit#L143-L170","kind":"function","name":"gemv_4bit","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":143,"end_line":170,"context_start_line":123,"context_end_line":190,"code":" kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n return out\n\n\ndef dequantize_4bit_inplace(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n\ndef gemv_4bit(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> torch.Tensor:\n if B.dtype != torch.uint8:\n B = B.squeeze().view(torch.uint8).unsqueeze(1)\n\n B_dq_triton = torch.empty(shapeB, dtype=A.dtype, device=A.device)\n\n with torch_accelerator_module.device(A.device):\n kernels_4bit.dequantize_4bit_impl_passing_code(\n B,\n absmax,\n blocksize,\n code,\n dtype=A.dtype,\n out=B_dq_triton,\n )\n\n return torch.nn.functional.linear(\n A,\n B_dq_triton,\n bias=None,\n )\n\n\n# optimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_pytorch\n# optimizer_update_8bit_blockwise_impl = torch.compile(kernels_optim.optimizer_update_8bit_blockwise_pytorch) # 60ms\n# optimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_triton_quant #2.8ms\n# optimizer_update_8bit_blockwise_impl = torch.compile(kernels_optim.optimizer_update_8bit_blockwise_triton_quant) # 2.3ms\noptimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_impl # ~0.95ms for adam\n\n\ndef optimizer_update_8bit_blockwise(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.optimizer_update_8bit_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.optimizer_update_8bit_blockwise#L180-L254","kind":"function","name":"optimizer_update_8bit_blockwise","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":180,"end_line":254,"context_start_line":160,"context_end_line":274,"code":" blocksize,\n code,\n dtype=A.dtype,\n out=B_dq_triton,\n )\n\n return torch.nn.functional.linear(\n A,\n B_dq_triton,\n bias=None,\n )\n\n\n# optimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_pytorch\n# optimizer_update_8bit_blockwise_impl = torch.compile(kernels_optim.optimizer_update_8bit_blockwise_pytorch) # 60ms\n# optimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_triton_quant #2.8ms\n# optimizer_update_8bit_blockwise_impl = torch.compile(kernels_optim.optimizer_update_8bit_blockwise_triton_quant) # 2.3ms\noptimizer_update_8bit_blockwise_impl = kernels_optim.optimizer_update_8bit_blockwise_impl # ~0.95ms for adam\n\n\ndef optimizer_update_8bit_blockwise(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n # torch._check(\n # g.numel() == p.numel(),\n # lambda: f\"g and p must have the same number of elements, got {g.numel()} and {p.numel()}\",\n # )\n # compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n # torch._check(\n # g.dtype in compute_dtypes,\n # lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n # )\n # torch._check(\n # g.dtype == p.dtype,\n # lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n # )\n # torch._check(\n # state1.dtype == torch.uint8,\n # lambda: f\"state1 must be uint8, got {state1.dtype}\",\n # )\n # torch._check(\n # qmap1.dtype == absmax1.dtype == torch.float32,\n # lambda: f\"Expected qmap1 and absmax1 to be float32, got qmap1.dtype={qmap1.dtype}, absmax1.dtype={absmax1.dtype}\",\n # )\n # if state2 is not None:\n # torch._check(\n # state2.dtype == torch.uint8,\n # lambda: f\"state2 must be uint8, got {state2.dtype}\",\n # )\n # torch._check(\n # qmap2.dtype == absmax2.dtype == torch.float32,\n # lambda: f\"Expected qmap2 and absmax2 to be float32, got qmap2.dtype={qmap2.dtype}, absmax2.dtype={absmax2.dtype}\",\n # )\n\n with torch_accelerator_module.device(state1.device):\n optimizer_update_8bit_blockwise_impl(\n optimizer_name=optimizer_name,\n g=g,\n p=p,\n state1=state1,\n state2=state2,\n beta1=beta1,\n beta2=beta2,\n beta3=beta3,\n alpha=alpha,\n eps=eps,\n step=step,\n lr=lr,\n qmap1=qmap1,\n qmap2=qmap2,\n absmax1=absmax1,\n absmax2=absmax2,\n weight_decay=weight_decay,\n gnorm_scale=gnorm_scale,\n skip_zeros=skip_zeros,\n )\n\n\ndef optimizer_update_32bit(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.ops.optimizer_update_32bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.ops.optimizer_update_32bit#L257-L297","kind":"function","name":"optimizer_update_32bit","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":257,"end_line":297,"context_start_line":237,"context_end_line":297,"code":" p=p,\n state1=state1,\n state2=state2,\n beta1=beta1,\n beta2=beta2,\n beta3=beta3,\n alpha=alpha,\n eps=eps,\n step=step,\n lr=lr,\n qmap1=qmap1,\n qmap2=qmap2,\n absmax1=absmax1,\n absmax2=absmax2,\n weight_decay=weight_decay,\n gnorm_scale=gnorm_scale,\n skip_zeros=skip_zeros,\n )\n\n\ndef optimizer_update_32bit(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n with torch_accelerator_module.device(state1.device):\n kernels_optim.optimizer_update_32bit_impl(\n optimizer_name=optimizer_name,\n g=g,\n p=p,\n state1=state1,\n state2=state2,\n unorm_vec=unorm_vec,\n max_unorm=max_unorm,\n param_norm=param_norm,\n beta1=beta1,\n beta2=beta2,\n beta3=beta3,\n alpha=alpha,\n eps=eps,\n weight_decay=weight_decay,\n step=step,\n lr=lr,\n gnorm_scale=gnorm_scale,\n skip_zeros=skip_zeros,\n )","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim","uri":"program://bitsandbytes/module/bitsandbytes.backends.triton.kernels_optim#L1-L1154","kind":"module","name":"bitsandbytes.backends.triton.kernels_optim","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":1,"end_line":1154,"context_start_line":1,"context_end_line":1154,"code":"import math\nfrom typing import Optional\n\nimport torch\n\nimport triton\nimport triton.language as tl\n\n# from triton.language.extra import libdevice\nfrom .kernels_8bit_quant import (\n dequant_8bit_blockwise,\n dequant_8bit_blockwise_kernel_util,\n quantize_8bit_blockwise_kernel_util,\n quantize_blockwise_triton,\n)\n\nMOMENTUM = 0\nRMSPROP = 1\nADAGRAD = 2\nADAM = 3\n# LION should be larger than MOMENTUM, RMSPROP, ADAGRAD due to comparison in kernels\nLION = 4\nADEMAMIX = 5\n\nname2optimizer_id = {\n \"momentum\": MOMENTUM,\n \"rmsprop\": RMSPROP,\n \"adagrad\": ADAGRAD,\n \"adam\": ADAM,\n \"lion\": LION,\n \"ademamix\": ADEMAMIX,\n}\n\n\n@triton.jit\ndef _optimizer_precondition_2state_32bit(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"Preprocessing optimizer, computing update norm (2-state optimizer)\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n s2_vals = tl.load(state2_ptr + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n\n correction1 = 1.0 / (1.0 - beta1_step)\n correction2 = 1.0 / (1.0 - beta2_step)\n\n if OPTIMIZER_ID == 3: # ADAM\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n s1_vals = s1_vals * correction1\n s2_vals = s2_vals * correction2\n\n update_vals = s1_vals / (tl.sqrt(s2_vals) + eps)\n\n update_norm = update_vals * update_vals\n\n elif OPTIMIZER_ID == 5: # ADEMAMIX\n update_norm = s1_vals\n\n total_norm = tl.sum(tl.where(mask, update_norm, 0.0))\n\n tl.atomic_add(unorm_ptr, total_norm)\n\n\n@triton.jit\ndef _optimizer_precondition_1state_32bit(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n eps: tl.constexpr,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"Preprocessing optimizer, computing update norm (1-state optimizer)\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n\n if OPTIMIZER_ID == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = s1_vals * beta1 + g_vals\n update_norm = s1_vals * s1_vals\n\n elif OPTIMIZER_ID == 4: # LION\n s1_vals = s1_vals * beta2 + (1.0 - beta2) * g_vals\n update_norm = s1_vals\n\n elif OPTIMIZER_ID == 1: # RMSPROP\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_vals = g_vals / (tl.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1_vals = s1_vals + g_vals * g_vals\n update_vals = g_vals / (tl.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n total_norm = tl.sum(tl.where(mask, update_norm, 0.0))\n\n tl.atomic_add(unorm_ptr, total_norm)\n\n\n@triton.jit\ndef _optimizer_update_2state_32bit_triton_kernel(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n max_unorm: tl.constexpr,\n param_norm,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n skip_zeros,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"2-state optimizer kernel\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n p_vals = tl.load(p_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n s2_vals = tl.load(state2_ptr + offsets, mask=mask, other=0.0)\n\n if OPTIMIZER_ID == 5: # ADEMAMIX\n s3_vals = tl.load(state1_ptr + n_elements + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n\n update_scale = 1.0\n if max_unorm > 0.0:\n current_unorm = tl.sqrt(tl.load(unorm_ptr))\n if current_unorm > max_unorm * param_norm:\n update_scale = (max_unorm * param_norm) / current_unorm\n\n if OPTIMIZER_ID == 3: # ADAM\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n correction1 = 1.0 - beta1_step\n correction2 = tl.sqrt(1.0 - beta2_step)\n step_size = -lr * correction2 / correction1\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n update_val = update_scale * step_size * (s1_vals / (tl.sqrt(s2_vals) + eps * correction2))\n p_vals = p_vals + update_val\n\n elif OPTIMIZER_ID == 5: # ADEMAMIX\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals # m1\n s3_vals = s3_vals * beta3 + (1.0 - beta3) * g_vals # m2\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals # nu\n\n correction1 = 1.0 - beta1_step\n correction2 = tl.sqrt(1.0 - beta2_step)\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n mixed_momentum = (s1_vals / correction1) + (alpha * s3_vals)\n adaptive_term = (tl.sqrt(s2_vals) / correction2) + eps\n p_vals = p_vals - lr * (mixed_momentum / adaptive_term)\n\n tl.store(p_ptr + offsets, p_vals, mask=mask)\n tl.store(state1_ptr + offsets, s1_vals, mask=mask)\n tl.store(state2_ptr + offsets, s2_vals, mask=mask)\n\n if OPTIMIZER_ID == 5: # ADEMAMIX\n tl.store(state1_ptr + n_elements + offsets, s3_vals, mask=mask)\n\n\n@triton.jit\ndef _optimizer_update_1state_32bit_triton_kernel(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n max_unorm: tl.constexpr,\n param_norm,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n skip_zeros,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"1-state optimizer kernel\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n p_vals = tl.load(p_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n if weight_decay > 0.0:\n g_vals = g_vals + p_vals * weight_decay\n\n update_scale = 1.0\n if max_unorm > 0.0:\n current_unorm = tl.sqrt(tl.load(unorm_ptr))\n if current_unorm > max_unorm * param_norm + eps:\n update_scale = (max_unorm * param_norm + eps) / current_unorm\n\n if OPTIMIZER_ID == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = s1_vals * beta1 + g_vals\n\n update_val = update_scale * (-lr * s1_vals)\n p_vals = p_vals + update_val\n\n elif OPTIMIZER_ID == 4: # LION\n momentum_update = s1_vals * beta1 + (1.0 - beta1) * g_vals\n update_val = update_scale * lr * tl.where(momentum_update > 0, 1.0, tl.where(momentum_update < 0, -1.0, 0.0))\n p_vals = p_vals - update_val\n\n s1_vals = s1_vals * beta2 + (1.0 - beta2) * g_vals\n\n elif OPTIMIZER_ID == 1: # RMSPROP\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals * g_vals\n\n update_val = update_scale * lr * g_vals / (tl.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1_vals = s1_vals + g_vals * g_vals\n\n update_val = lr * g_vals / (tl.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n tl.store(p_ptr + offsets, p_vals, mask=mask)\n tl.store(state1_ptr + offsets, s1_vals, mask=mask)\n\n\nname2optimizer_32bit_fn = {\n \"adam\": {\n \"preprocess\": _optimizer_precondition_2state_32bit,\n \"update\": _optimizer_update_2state_32bit_triton_kernel,\n },\n \"ademamix\": {\n \"preprocess\": _optimizer_precondition_2state_32bit,\n \"update\": _optimizer_update_2state_32bit_triton_kernel,\n },\n \"momentum\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"rmsprop\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"adagrad\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"lion\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n}\n\n\ndef optimizer_update_32bit_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n \"\"\"\n 32-bit optimizer implemented by Triton\n \"\"\"\n if skip_zeros:\n raise NotImplementedError(\"skip_zeros is not supported on XPU yet\")\n\n BLOCK_SIZE = 256\n N_PER_TH = 1 # Number of blocks processed per thread.\n grid = (triton.cdiv(p.numel(), BLOCK_SIZE * N_PER_TH),)\n optimizer_id = name2optimizer_id[optimizer_name]\n fn_preprocess = name2optimizer_32bit_fn[optimizer_name][\"preprocess\"]\n fn_update = name2optimizer_32bit_fn[optimizer_name][\"update\"]\n\n # In torch=2.7 on XPU there is an issue with libdevice.pow, leading to an error.\n # For backwards compatibility we precompute the bias correction factors.\n beta1_step = beta1**step\n beta2_step = beta2**step\n\n if optimizer_name == \"lion\":\n fn_update[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n skip_zeros,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n if max_unorm > 0.0:\n unorm_vec.zero_()\n fn_preprocess[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n beta1,\n beta2,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n else:\n if max_unorm > 0.0:\n unorm_vec.zero_()\n fn_preprocess[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n beta1,\n beta2,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n fn_update[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n skip_zeros,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n\n###########################################\n# Pure torch implementation for reference #\n###########################################\n\n\n@torch.compile\ndef _dequantize_blockwise_pytorch(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n) -> torch.Tensor:\n \"\"\"\n Pure PyTorch reference implementation for block-wise dequantization.\n \"\"\"\n if A.numel() == 0:\n return torch.empty_like(A, dtype=dtype)\n\n A_flat = A.flatten()\n num_elements = A_flat.numel()\n\n dequantized_flat = code.to(A.device)[A_flat.long()].to(dtype)\n\n num_blocks = math.ceil(num_elements / blocksize)\n pad_len = num_blocks * blocksize - num_elements\n if pad_len > 0:\n dequantized_flat = torch.nn.functional.pad(dequantized_flat, (0, pad_len))\n\n dequantized_blocks = dequantized_flat.reshape(num_blocks, blocksize)\n\n rescaled_blocks = dequantized_blocks * absmax.unsqueeze(1).to(dtype)\n\n rescaled_flat = rescaled_blocks.flatten()\n if pad_len > 0:\n rescaled_flat = rescaled_flat[:-pad_len]\n\n return rescaled_flat.reshape(A.shape)\n\n\n@torch.compile\ndef _quantize_blockwise_pytorch(\n A: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Pure PyTorch reference implementation for block-wise quantization.\n \"\"\"\n if A.numel() == 0:\n return torch.empty_like(A, dtype=torch.uint8), torch.empty(0, dtype=torch.float32, device=A.device)\n\n A_flat = A.flatten()\n num_elements = A_flat.numel()\n\n num_blocks = math.ceil(num_elements / blocksize)\n\n pad_len = num_blocks * blocksize - num_elements\n if pad_len > 0:\n A_flat = torch.nn.functional.pad(A_flat, (0, pad_len))\n\n A_blocks = A_flat.reshape(num_blocks, blocksize)\n\n absmax = torch.max(torch.abs(A_blocks), dim=1, keepdim=True)[0]\n absmax[absmax == 0] = 1.0\n\n scaled_blocks = A_blocks / absmax\n\n # Inefficient but straightforward quantization, takes a lot of memory\n diff = torch.abs(scaled_blocks.unsqueeze(2) - code.to(A.device))\n quantized_indices = torch.argmin(diff, dim=2).to(torch.uint8)\n\n quantized_flat = quantized_indices.flatten()\n if pad_len > 0:\n quantized_flat = quantized_flat[:-pad_len]\n\n return quantized_flat.reshape(A.shape), absmax.flatten()\n\n\n# Main updated function\ndef optimizer_update_8bit_blockwise_pytorch(\n p: torch.Tensor,\n g: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float, # ADEMIX\n alpha: float, # ADEMIX\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,\n gnorm_scale: float,\n skip_zeros: bool,\n # ADEMIX\n *,\n optimizer_name: str,\n) -> None:\n \"\"\"\n Pure PyTorch implementation of the 8-bit block-wise optimizer update step.\n This version ensures high-precision updates for float16 parameters.\n \"\"\"\n if skip_zeros:\n raise ValueError(\"skip_zeros is not supported on XPU yet.\")\n\n blocksize = 256\n\n with torch.no_grad():\n # Dequantize states to perform updates in 32-bit precision\n if optimizer_name == \"ademamix\" and absmax1.ndim == 2:\n # For AdEMAMix, state1 holds two EMAs, so absmax1 is stacked.\n s1_1_fp32 = _dequantize_blockwise_pytorch(state1[0], absmax1[0], qmap1, blocksize, torch.float32)\n s1_2_fp32 = _dequantize_blockwise_pytorch(state1[1], absmax1[1], qmap1, blocksize, torch.float32)\n state1_fp32 = torch.stack([s1_1_fp32, s1_2_fp32])\n else:\n state1_fp32 = _dequantize_blockwise_pytorch(state1, absmax1, qmap1, blocksize, torch.float32)\n\n state2_fp32 = None\n if state2 is not None:\n state2_fp32 = _dequantize_blockwise_pytorch(state2, absmax2, qmap2, blocksize, torch.float32)\n\n grad = g.float() * gnorm_scale\n\n # Create a 32-bit copy of the parameter for high-precision updates\n p_fp32 = p.data.float()\n\n if optimizer_name == \"adam\":\n state1_fp32.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n state2_fp32.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n\n bias_correction1 = 1.0 - beta1**step\n bias_correction2 = 1.0 - beta2**step\n\n denom = (state2_fp32.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n p_fp32.addcdiv_(state1_fp32, denom, value=-lr / bias_correction1)\n\n elif optimizer_name == \"ademamix\":\n m1_fp32, m2_fp32 = state1_fp32[0], state1_fp32[1]\n nu_fp32 = state2_fp32\n\n m1_fp32.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n m2_fp32.mul_(beta3).add_(grad, alpha=1.0 - beta3)\n nu_fp32.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n\n bias_correction1 = 1.0 - beta1**step\n bias_correction2 = math.sqrt(1.0 - beta2**step)\n\n update = (m1_fp32 / bias_correction1 + alpha * m2_fp32) / (nu_fp32.sqrt() / bias_correction2 + eps)\n\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n p_fp32.add_(update, alpha=-lr)\n state1_fp32 = torch.stack([m1_fp32, m2_fp32])\n\n elif optimizer_name == \"momentum\":\n grad.add_(p_fp32, alpha=weight_decay)\n if step == 1:\n state1_fp32.copy_(grad)\n else:\n state1_fp32.mul_(beta1).add_(grad)\n p_fp32.add_(state1_fp32, alpha=-lr)\n\n elif optimizer_name == \"rmsprop\":\n grad.add_(p_fp32, alpha=weight_decay)\n state1_fp32.mul_(beta1).addcmul_(grad, grad, value=1.0 - beta1)\n p_fp32.addcdiv_(grad, state1_fp32.sqrt().add_(eps), value=-lr)\n\n elif optimizer_name == \"lion\":\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n update_dir = torch.sign(state1_fp32.mul(beta1) + grad.mul(1.0 - beta1))\n p_fp32.add_(update_dir, alpha=-lr)\n\n state1_fp32.mul_(beta2).add_(grad, alpha=1.0 - beta2)\n\n elif optimizer_name == \"adagrad\":\n grad.add_(p_fp32, alpha=weight_decay)\n state1_fp32.addcmul_(grad, grad, value=1.0)\n p_fp32.addcdiv_(grad, state1_fp32.sqrt().add_(eps), value=-lr)\n\n else:\n raise NotImplementedError(\n f\"Pure PyTorch implementation for optimizer '{optimizer_name}' is not available.\"\n )\n\n # Copy the updated 32-bi\n# ... truncated ...","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._optimizer_precondition_2state_32bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._optimizer_precondition_2state_32bit#L36-L87","kind":"function","name":"_optimizer_precondition_2state_32bit","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":36,"end_line":87,"context_start_line":16,"context_end_line":107,"code":"\nMOMENTUM = 0\nRMSPROP = 1\nADAGRAD = 2\nADAM = 3\n# LION should be larger than MOMENTUM, RMSPROP, ADAGRAD due to comparison in kernels\nLION = 4\nADEMAMIX = 5\n\nname2optimizer_id = {\n \"momentum\": MOMENTUM,\n \"rmsprop\": RMSPROP,\n \"adagrad\": ADAGRAD,\n \"adam\": ADAM,\n \"lion\": LION,\n \"ademamix\": ADEMAMIX,\n}\n\n\n@triton.jit\ndef _optimizer_precondition_2state_32bit(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"Preprocessing optimizer, computing update norm (2-state optimizer)\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n s2_vals = tl.load(state2_ptr + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n\n correction1 = 1.0 / (1.0 - beta1_step)\n correction2 = 1.0 / (1.0 - beta2_step)\n\n if OPTIMIZER_ID == 3: # ADAM\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n s1_vals = s1_vals * correction1\n s2_vals = s2_vals * correction2\n\n update_vals = s1_vals / (tl.sqrt(s2_vals) + eps)\n\n update_norm = update_vals * update_vals\n\n elif OPTIMIZER_ID == 5: # ADEMAMIX\n update_norm = s1_vals\n\n total_norm = tl.sum(tl.where(mask, update_norm, 0.0))\n\n tl.atomic_add(unorm_ptr, total_norm)\n\n\n@triton.jit\ndef _optimizer_precondition_1state_32bit(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n eps: tl.constexpr,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._optimizer_precondition_1state_32bit","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._optimizer_precondition_1state_32bit#L91-L145","kind":"function","name":"_optimizer_precondition_1state_32bit","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":91,"end_line":145,"context_start_line":71,"context_end_line":165,"code":" if OPTIMIZER_ID == 3: # ADAM\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n s1_vals = s1_vals * correction1\n s2_vals = s2_vals * correction2\n\n update_vals = s1_vals / (tl.sqrt(s2_vals) + eps)\n\n update_norm = update_vals * update_vals\n\n elif OPTIMIZER_ID == 5: # ADEMAMIX\n update_norm = s1_vals\n\n total_norm = tl.sum(tl.where(mask, update_norm, 0.0))\n\n tl.atomic_add(unorm_ptr, total_norm)\n\n\n@triton.jit\ndef _optimizer_precondition_1state_32bit(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n eps: tl.constexpr,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"Preprocessing optimizer, computing update norm (1-state optimizer)\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n\n if OPTIMIZER_ID == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = s1_vals * beta1 + g_vals\n update_norm = s1_vals * s1_vals\n\n elif OPTIMIZER_ID == 4: # LION\n s1_vals = s1_vals * beta2 + (1.0 - beta2) * g_vals\n update_norm = s1_vals\n\n elif OPTIMIZER_ID == 1: # RMSPROP\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_vals = g_vals / (tl.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1_vals = s1_vals + g_vals * g_vals\n update_vals = g_vals / (tl.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n total_norm = tl.sum(tl.where(mask, update_norm, 0.0))\n\n tl.atomic_add(unorm_ptr, total_norm)\n\n\n@triton.jit\ndef _optimizer_update_2state_32bit_triton_kernel(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n max_unorm: tl.constexpr,\n param_norm,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._optimizer_update_2state_32bit_triton_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._optimizer_update_2state_32bit_triton_kernel#L149-L230","kind":"function","name":"_optimizer_update_2state_32bit_triton_kernel","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":149,"end_line":230,"context_start_line":129,"context_end_line":250,"code":" elif OPTIMIZER_ID == 4: # LION\n s1_vals = s1_vals * beta2 + (1.0 - beta2) * g_vals\n update_norm = s1_vals\n\n elif OPTIMIZER_ID == 1: # RMSPROP\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals * g_vals\n update_vals = g_vals / (tl.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1_vals = s1_vals + g_vals * g_vals\n update_vals = g_vals / (tl.sqrt(s1_vals) + eps)\n update_norm = update_vals * update_vals\n\n total_norm = tl.sum(tl.where(mask, update_norm, 0.0))\n\n tl.atomic_add(unorm_ptr, total_norm)\n\n\n@triton.jit\ndef _optimizer_update_2state_32bit_triton_kernel(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n max_unorm: tl.constexpr,\n param_norm,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n skip_zeros,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"2-state optimizer kernel\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n p_vals = tl.load(p_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n s2_vals = tl.load(state2_ptr + offsets, mask=mask, other=0.0)\n\n if OPTIMIZER_ID == 5: # ADEMAMIX\n s3_vals = tl.load(state1_ptr + n_elements + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n\n update_scale = 1.0\n if max_unorm > 0.0:\n current_unorm = tl.sqrt(tl.load(unorm_ptr))\n if current_unorm > max_unorm * param_norm:\n update_scale = (max_unorm * param_norm) / current_unorm\n\n if OPTIMIZER_ID == 3: # ADAM\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals\n\n correction1 = 1.0 - beta1_step\n correction2 = tl.sqrt(1.0 - beta2_step)\n step_size = -lr * correction2 / correction1\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n update_val = update_scale * step_size * (s1_vals / (tl.sqrt(s2_vals) + eps * correction2))\n p_vals = p_vals + update_val\n\n elif OPTIMIZER_ID == 5: # ADEMAMIX\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals # m1\n s3_vals = s3_vals * beta3 + (1.0 - beta3) * g_vals # m2\n s2_vals = s2_vals * beta2 + (1.0 - beta2) * g_vals * g_vals # nu\n\n correction1 = 1.0 - beta1_step\n correction2 = tl.sqrt(1.0 - beta2_step)\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n mixed_momentum = (s1_vals / correction1) + (alpha * s3_vals)\n adaptive_term = (tl.sqrt(s2_vals) / correction2) + eps\n p_vals = p_vals - lr * (mixed_momentum / adaptive_term)\n\n tl.store(p_ptr + offsets, p_vals, mask=mask)\n tl.store(state1_ptr + offsets, s1_vals, mask=mask)\n tl.store(state2_ptr + offsets, s2_vals, mask=mask)\n\n if OPTIMIZER_ID == 5: # ADEMAMIX\n tl.store(state1_ptr + n_elements + offsets, s3_vals, mask=mask)\n\n\n@triton.jit\ndef _optimizer_update_1state_32bit_triton_kernel(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n max_unorm: tl.constexpr,\n param_norm,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._optimizer_update_1state_32bit_triton_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._optimizer_update_1state_32bit_triton_kernel#L234-L308","kind":"function","name":"_optimizer_update_1state_32bit_triton_kernel","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":234,"end_line":308,"context_start_line":214,"context_end_line":328,"code":"\n correction1 = 1.0 - beta1_step\n correction2 = tl.sqrt(1.0 - beta2_step)\n\n if weight_decay > 0.0:\n p_vals = p_vals * (1.0 - lr * weight_decay)\n\n mixed_momentum = (s1_vals / correction1) + (alpha * s3_vals)\n adaptive_term = (tl.sqrt(s2_vals) / correction2) + eps\n p_vals = p_vals - lr * (mixed_momentum / adaptive_term)\n\n tl.store(p_ptr + offsets, p_vals, mask=mask)\n tl.store(state1_ptr + offsets, s1_vals, mask=mask)\n tl.store(state2_ptr + offsets, s2_vals, mask=mask)\n\n if OPTIMIZER_ID == 5: # ADEMAMIX\n tl.store(state1_ptr + n_elements + offsets, s3_vals, mask=mask)\n\n\n@triton.jit\ndef _optimizer_update_1state_32bit_triton_kernel(\n g_ptr,\n p_ptr,\n state1_ptr,\n state2_ptr,\n unorm_ptr,\n max_unorm: tl.constexpr,\n param_norm,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n weight_decay: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale: tl.constexpr,\n skip_zeros,\n n_elements,\n OPTIMIZER_ID: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n \"\"\"1-state optimizer kernel\"\"\"\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE * N_PER_TH)\n mask = offsets < n_elements\n\n g_vals = tl.load(g_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n p_vals = tl.load(p_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n s1_vals = tl.load(state1_ptr + offsets, mask=mask, other=0.0)\n\n g_vals = gnorm_scale * g_vals\n if weight_decay > 0.0:\n g_vals = g_vals + p_vals * weight_decay\n\n update_scale = 1.0\n if max_unorm > 0.0:\n current_unorm = tl.sqrt(tl.load(unorm_ptr))\n if current_unorm > max_unorm * param_norm + eps:\n update_scale = (max_unorm * param_norm + eps) / current_unorm\n\n if OPTIMIZER_ID == 0: # MOMENTUM\n if step == 1:\n s1_vals = g_vals\n else:\n s1_vals = s1_vals * beta1 + g_vals\n\n update_val = update_scale * (-lr * s1_vals)\n p_vals = p_vals + update_val\n\n elif OPTIMIZER_ID == 4: # LION\n momentum_update = s1_vals * beta1 + (1.0 - beta1) * g_vals\n update_val = update_scale * lr * tl.where(momentum_update > 0, 1.0, tl.where(momentum_update < 0, -1.0, 0.0))\n p_vals = p_vals - update_val\n\n s1_vals = s1_vals * beta2 + (1.0 - beta2) * g_vals\n\n elif OPTIMIZER_ID == 1: # RMSPROP\n s1_vals = s1_vals * beta1 + (1.0 - beta1) * g_vals * g_vals\n\n update_val = update_scale * lr * g_vals / (tl.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1_vals = s1_vals + g_vals * g_vals\n\n update_val = lr * g_vals / (tl.sqrt(s1_vals) + eps)\n p_vals = p_vals - update_val\n\n tl.store(p_ptr + offsets, p_vals, mask=mask)\n tl.store(state1_ptr + offsets, s1_vals, mask=mask)\n\n\nname2optimizer_32bit_fn = {\n \"adam\": {\n \"preprocess\": _optimizer_precondition_2state_32bit,\n \"update\": _optimizer_update_2state_32bit_triton_kernel,\n },\n \"ademamix\": {\n \"preprocess\": _optimizer_precondition_2state_32bit,\n \"update\": _optimizer_update_2state_32bit_triton_kernel,\n },\n \"momentum\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"rmsprop\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"adagrad\": {","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim.optimizer_update_32bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim.optimizer_update_32bit_impl#L339-L479","kind":"function","name":"optimizer_update_32bit_impl","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":339,"end_line":479,"context_start_line":319,"context_end_line":499,"code":" },\n \"momentum\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"rmsprop\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"adagrad\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n \"lion\": {\n \"preprocess\": _optimizer_precondition_1state_32bit,\n \"update\": _optimizer_update_1state_32bit_triton_kernel,\n },\n}\n\n\ndef optimizer_update_32bit_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n \"\"\"\n 32-bit optimizer implemented by Triton\n \"\"\"\n if skip_zeros:\n raise NotImplementedError(\"skip_zeros is not supported on XPU yet\")\n\n BLOCK_SIZE = 256\n N_PER_TH = 1 # Number of blocks processed per thread.\n grid = (triton.cdiv(p.numel(), BLOCK_SIZE * N_PER_TH),)\n optimizer_id = name2optimizer_id[optimizer_name]\n fn_preprocess = name2optimizer_32bit_fn[optimizer_name][\"preprocess\"]\n fn_update = name2optimizer_32bit_fn[optimizer_name][\"update\"]\n\n # In torch=2.7 on XPU there is an issue with libdevice.pow, leading to an error.\n # For backwards compatibility we precompute the bias correction factors.\n beta1_step = beta1**step\n beta2_step = beta2**step\n\n if optimizer_name == \"lion\":\n fn_update[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n skip_zeros,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n if max_unorm > 0.0:\n unorm_vec.zero_()\n fn_preprocess[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n beta1,\n beta2,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n else:\n if max_unorm > 0.0:\n unorm_vec.zero_()\n fn_preprocess[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n beta1,\n beta2,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n fn_update[grid](\n g,\n p,\n state1,\n state2,\n unorm_vec,\n max_unorm,\n param_norm,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n weight_decay,\n step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n skip_zeros,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n\n###########################################\n# Pure torch implementation for reference #\n###########################################\n\n\n@torch.compile\ndef _dequantize_blockwise_pytorch(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n) -> torch.Tensor:\n \"\"\"\n Pure PyTorch reference implementation for block-wise dequantization.\n \"\"\"\n if A.numel() == 0:\n return torch.empty_like(A, dtype=dtype)","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._dequantize_blockwise_pytorch","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._dequantize_blockwise_pytorch#L488-L519","kind":"function","name":"_dequantize_blockwise_pytorch","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":488,"end_line":519,"context_start_line":468,"context_end_line":539,"code":" step,\n beta1_step,\n beta2_step,\n lr,\n gnorm_scale,\n skip_zeros,\n p.numel(),\n optimizer_id,\n BLOCK_SIZE,\n N_PER_TH,\n num_warps=2,\n )\n\n\n###########################################\n# Pure torch implementation for reference #\n###########################################\n\n\n@torch.compile\ndef _dequantize_blockwise_pytorch(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n) -> torch.Tensor:\n \"\"\"\n Pure PyTorch reference implementation for block-wise dequantization.\n \"\"\"\n if A.numel() == 0:\n return torch.empty_like(A, dtype=dtype)\n\n A_flat = A.flatten()\n num_elements = A_flat.numel()\n\n dequantized_flat = code.to(A.device)[A_flat.long()].to(dtype)\n\n num_blocks = math.ceil(num_elements / blocksize)\n pad_len = num_blocks * blocksize - num_elements\n if pad_len > 0:\n dequantized_flat = torch.nn.functional.pad(dequantized_flat, (0, pad_len))\n\n dequantized_blocks = dequantized_flat.reshape(num_blocks, blocksize)\n\n rescaled_blocks = dequantized_blocks * absmax.unsqueeze(1).to(dtype)\n\n rescaled_flat = rescaled_blocks.flatten()\n if pad_len > 0:\n rescaled_flat = rescaled_flat[:-pad_len]\n\n return rescaled_flat.reshape(A.shape)\n\n\n@torch.compile\ndef _quantize_blockwise_pytorch(\n A: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Pure PyTorch reference implementation for block-wise quantization.\n \"\"\"\n if A.numel() == 0:\n return torch.empty_like(A, dtype=torch.uint8), torch.empty(0, dtype=torch.float32, device=A.device)\n\n A_flat = A.flatten()\n num_elements = A_flat.numel()\n\n num_blocks = math.ceil(num_elements / blocksize)\n\n pad_len = num_blocks * blocksize - num_elements","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._quantize_blockwise_pytorch","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._quantize_blockwise_pytorch#L523-L558","kind":"function","name":"_quantize_blockwise_pytorch","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":523,"end_line":558,"context_start_line":503,"context_end_line":578,"code":"\n dequantized_flat = code.to(A.device)[A_flat.long()].to(dtype)\n\n num_blocks = math.ceil(num_elements / blocksize)\n pad_len = num_blocks * blocksize - num_elements\n if pad_len > 0:\n dequantized_flat = torch.nn.functional.pad(dequantized_flat, (0, pad_len))\n\n dequantized_blocks = dequantized_flat.reshape(num_blocks, blocksize)\n\n rescaled_blocks = dequantized_blocks * absmax.unsqueeze(1).to(dtype)\n\n rescaled_flat = rescaled_blocks.flatten()\n if pad_len > 0:\n rescaled_flat = rescaled_flat[:-pad_len]\n\n return rescaled_flat.reshape(A.shape)\n\n\n@torch.compile\ndef _quantize_blockwise_pytorch(\n A: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n) -> tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Pure PyTorch reference implementation for block-wise quantization.\n \"\"\"\n if A.numel() == 0:\n return torch.empty_like(A, dtype=torch.uint8), torch.empty(0, dtype=torch.float32, device=A.device)\n\n A_flat = A.flatten()\n num_elements = A_flat.numel()\n\n num_blocks = math.ceil(num_elements / blocksize)\n\n pad_len = num_blocks * blocksize - num_elements\n if pad_len > 0:\n A_flat = torch.nn.functional.pad(A_flat, (0, pad_len))\n\n A_blocks = A_flat.reshape(num_blocks, blocksize)\n\n absmax = torch.max(torch.abs(A_blocks), dim=1, keepdim=True)[0]\n absmax[absmax == 0] = 1.0\n\n scaled_blocks = A_blocks / absmax\n\n # Inefficient but straightforward quantization, takes a lot of memory\n diff = torch.abs(scaled_blocks.unsqueeze(2) - code.to(A.device))\n quantized_indices = torch.argmin(diff, dim=2).to(torch.uint8)\n\n quantized_flat = quantized_indices.flatten()\n if pad_len > 0:\n quantized_flat = quantized_flat[:-pad_len]\n\n return quantized_flat.reshape(A.shape), absmax.flatten()\n\n\n# Main updated function\ndef optimizer_update_8bit_blockwise_pytorch(\n p: torch.Tensor,\n g: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float, # ADEMIX\n alpha: float, # ADEMIX\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim.optimizer_update_8bit_blockwise_pytorch","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim.optimizer_update_8bit_blockwise_pytorch#L562-L700","kind":"function","name":"optimizer_update_8bit_blockwise_pytorch","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":562,"end_line":700,"context_start_line":542,"context_end_line":720,"code":"\n A_blocks = A_flat.reshape(num_blocks, blocksize)\n\n absmax = torch.max(torch.abs(A_blocks), dim=1, keepdim=True)[0]\n absmax[absmax == 0] = 1.0\n\n scaled_blocks = A_blocks / absmax\n\n # Inefficient but straightforward quantization, takes a lot of memory\n diff = torch.abs(scaled_blocks.unsqueeze(2) - code.to(A.device))\n quantized_indices = torch.argmin(diff, dim=2).to(torch.uint8)\n\n quantized_flat = quantized_indices.flatten()\n if pad_len > 0:\n quantized_flat = quantized_flat[:-pad_len]\n\n return quantized_flat.reshape(A.shape), absmax.flatten()\n\n\n# Main updated function\ndef optimizer_update_8bit_blockwise_pytorch(\n p: torch.Tensor,\n g: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float, # ADEMIX\n alpha: float, # ADEMIX\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,\n gnorm_scale: float,\n skip_zeros: bool,\n # ADEMIX\n *,\n optimizer_name: str,\n) -> None:\n \"\"\"\n Pure PyTorch implementation of the 8-bit block-wise optimizer update step.\n This version ensures high-precision updates for float16 parameters.\n \"\"\"\n if skip_zeros:\n raise ValueError(\"skip_zeros is not supported on XPU yet.\")\n\n blocksize = 256\n\n with torch.no_grad():\n # Dequantize states to perform updates in 32-bit precision\n if optimizer_name == \"ademamix\" and absmax1.ndim == 2:\n # For AdEMAMix, state1 holds two EMAs, so absmax1 is stacked.\n s1_1_fp32 = _dequantize_blockwise_pytorch(state1[0], absmax1[0], qmap1, blocksize, torch.float32)\n s1_2_fp32 = _dequantize_blockwise_pytorch(state1[1], absmax1[1], qmap1, blocksize, torch.float32)\n state1_fp32 = torch.stack([s1_1_fp32, s1_2_fp32])\n else:\n state1_fp32 = _dequantize_blockwise_pytorch(state1, absmax1, qmap1, blocksize, torch.float32)\n\n state2_fp32 = None\n if state2 is not None:\n state2_fp32 = _dequantize_blockwise_pytorch(state2, absmax2, qmap2, blocksize, torch.float32)\n\n grad = g.float() * gnorm_scale\n\n # Create a 32-bit copy of the parameter for high-precision updates\n p_fp32 = p.data.float()\n\n if optimizer_name == \"adam\":\n state1_fp32.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n state2_fp32.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n\n bias_correction1 = 1.0 - beta1**step\n bias_correction2 = 1.0 - beta2**step\n\n denom = (state2_fp32.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n p_fp32.addcdiv_(state1_fp32, denom, value=-lr / bias_correction1)\n\n elif optimizer_name == \"ademamix\":\n m1_fp32, m2_fp32 = state1_fp32[0], state1_fp32[1]\n nu_fp32 = state2_fp32\n\n m1_fp32.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n m2_fp32.mul_(beta3).add_(grad, alpha=1.0 - beta3)\n nu_fp32.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n\n bias_correction1 = 1.0 - beta1**step\n bias_correction2 = math.sqrt(1.0 - beta2**step)\n\n update = (m1_fp32 / bias_correction1 + alpha * m2_fp32) / (nu_fp32.sqrt() / bias_correction2 + eps)\n\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n p_fp32.add_(update, alpha=-lr)\n state1_fp32 = torch.stack([m1_fp32, m2_fp32])\n\n elif optimizer_name == \"momentum\":\n grad.add_(p_fp32, alpha=weight_decay)\n if step == 1:\n state1_fp32.copy_(grad)\n else:\n state1_fp32.mul_(beta1).add_(grad)\n p_fp32.add_(state1_fp32, alpha=-lr)\n\n elif optimizer_name == \"rmsprop\":\n grad.add_(p_fp32, alpha=weight_decay)\n state1_fp32.mul_(beta1).addcmul_(grad, grad, value=1.0 - beta1)\n p_fp32.addcdiv_(grad, state1_fp32.sqrt().add_(eps), value=-lr)\n\n elif optimizer_name == \"lion\":\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n update_dir = torch.sign(state1_fp32.mul(beta1) + grad.mul(1.0 - beta1))\n p_fp32.add_(update_dir, alpha=-lr)\n\n state1_fp32.mul_(beta2).add_(grad, alpha=1.0 - beta2)\n\n elif optimizer_name == \"adagrad\":\n grad.add_(p_fp32, alpha=weight_decay)\n state1_fp32.addcmul_(grad, grad, value=1.0)\n p_fp32.addcdiv_(grad, state1_fp32.sqrt().add_(eps), value=-lr)\n\n else:\n raise NotImplementedError(\n f\"Pure PyTorch implementation for optimizer '{optimizer_name}' is not available.\"\n )\n\n # Copy the updated 32-bit parameter back to the original tensor\n p.data.copy_(p_fp32)\n\n # Re-quantize states and update state tensors in-place\n if optimizer_name == \"ademamix\":\n new_m1_8bit, new_absmax_m1 = _quantize_blockwise_pytorch(state1_fp32[0], qmap1, blocksize)\n new_m2_8bit, new_absmax_m2 = _quantize_blockwise_pytorch(state1_fp32[1], qmap1, blocksize)\n state1[0].copy_(new_m1_8bit)\n state1[1].copy_(new_m2_8bit)\n absmax1[0].copy_(new_absmax_m1)\n absmax1[1].copy_(new_absmax_m2)\n\n new_state2_8bit, new_absmax2 = _quantize_blockwise_pytorch(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n else:\n new_state1_8bit, new_absmax1 = _quantize_blockwise_pytorch(state1_fp32, qmap1, blocksize)\n state1.copy_(new_state1_8bit)\n absmax1.copy_(new_absmax1)\n\n if state2_fp32 is not None:\n new_state2_8bit, new_absmax2 = _quantize_blockwise_pytorch(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n\n\n#######################################\n# Mixed torch + triton implementation #\n#######################################\n\n\n# Much more memory efficient due to using triton for quantization/dequantization\ndef optimizer_update_8bit_blockwise_triton_quant(\n p: torch.Tensor,\n g: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float, # ADEMIX\n alpha: float, # ADEMIX\n eps: float,\n step: int,\n lr: float,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim.optimizer_update_8bit_blockwise_triton_quant","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim.optimizer_update_8bit_blockwise_triton_quant#L709-L847","kind":"function","name":"optimizer_update_8bit_blockwise_triton_quant","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":709,"end_line":847,"context_start_line":689,"context_end_line":867,"code":" new_state2_8bit, new_absmax2 = _quantize_blockwise_pytorch(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n else:\n new_state1_8bit, new_absmax1 = _quantize_blockwise_pytorch(state1_fp32, qmap1, blocksize)\n state1.copy_(new_state1_8bit)\n absmax1.copy_(new_absmax1)\n\n if state2_fp32 is not None:\n new_state2_8bit, new_absmax2 = _quantize_blockwise_pytorch(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n\n\n#######################################\n# Mixed torch + triton implementation #\n#######################################\n\n\n# Much more memory efficient due to using triton for quantization/dequantization\ndef optimizer_update_8bit_blockwise_triton_quant(\n p: torch.Tensor,\n g: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float, # ADEMIX\n alpha: float, # ADEMIX\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,\n gnorm_scale: float,\n skip_zeros: bool,\n # ADEMIX\n *,\n optimizer_name: str,\n) -> None:\n \"\"\"\n Pure PyTorch implementation of the 8-bit block-wise optimizer update step.\n This version ensures high-precision updates for float16 parameters.\n \"\"\"\n if skip_zeros and not torch.any(g):\n return\n\n blocksize = 256\n grad = g.float() * gnorm_scale\n\n with torch.no_grad():\n # Create a 32-bit copy of the parameter for high-precision updates\n p_fp32 = p.data.float()\n\n # Dequantize states to perform updates in 32-bit precision\n if optimizer_name == \"ademamix\" and absmax1.ndim == 2:\n # For AdEMAMix, state1 holds two EMAs, so absmax1 is stacked.\n s1_1_fp32 = dequant_8bit_blockwise(state1[0], absmax1[0], qmap1, blocksize, dtype=torch.float32)\n s1_2_fp32 = dequant_8bit_blockwise(state1[1], absmax1[1], qmap1, blocksize, dtype=torch.float32)\n state1_fp32 = torch.stack([s1_1_fp32, s1_2_fp32])\n else:\n state1_fp32 = dequant_8bit_blockwise(state1, absmax1, qmap1, blocksize, dtype=torch.float32)\n\n state2_fp32 = None\n if state2 is not None:\n state2_fp32 = dequant_8bit_blockwise(state2, absmax2, qmap2, blocksize, dtype=torch.float32)\n\n # Apply optimizer-specific update logic\n if optimizer_name == \"adam\":\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n state1_fp32.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n state2_fp32.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n\n bias_correction1 = 1.0 - beta1**step\n bias_correction2 = 1.0 - beta2**step\n\n denom = (state2_fp32.sqrt() / math.sqrt(bias_correction2)).add_(eps)\n p_fp32.addcdiv_(state1_fp32, denom, value=-lr / bias_correction1)\n\n elif optimizer_name == \"ademamix\":\n m1_fp32, m2_fp32 = state1_fp32[0], state1_fp32[1]\n nu_fp32 = state2_fp32\n\n m1_fp32.mul_(beta1).add_(grad, alpha=1.0 - beta1)\n m2_fp32.mul_(beta3).add_(grad, alpha=1.0 - beta3)\n nu_fp32.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)\n\n bias_correction1 = 1.0 - beta1**step\n bias_correction2 = math.sqrt(1.0 - beta2**step)\n\n update = (m1_fp32 / bias_correction1 + alpha * m2_fp32) / (nu_fp32.sqrt() / bias_correction2 + eps)\n\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n p_fp32.add_(update, alpha=-lr)\n state1_fp32 = torch.stack([m1_fp32, m2_fp32])\n\n elif optimizer_name == \"momentum\":\n grad.add_(p_fp32, alpha=weight_decay)\n if step == 1:\n state1_fp32.copy_(grad)\n else:\n state1_fp32.mul_(beta1).add_(grad)\n p_fp32.add_(state1_fp32, alpha=-lr)\n\n elif optimizer_name == \"rmsprop\":\n grad.add_(p_fp32, alpha=weight_decay)\n state1_fp32.mul_(beta1).addcmul_(grad, grad, value=1.0 - beta1)\n p_fp32.addcdiv_(grad, state1_fp32.sqrt().add_(eps), value=-lr)\n\n elif optimizer_name == \"lion\":\n if weight_decay > 0.0:\n p_fp32.mul_(1.0 - lr * weight_decay)\n\n update_dir = torch.sign(state1_fp32.mul(beta1) + grad.mul(1.0 - beta1))\n p_fp32.add_(update_dir, alpha=-lr)\n\n state1_fp32.mul_(beta2).add_(grad, alpha=1.0 - beta2)\n\n elif optimizer_name == \"adagrad\":\n grad.add_(p_fp32, alpha=weight_decay)\n state1_fp32.addcmul_(grad, grad, value=1.0)\n p_fp32.addcdiv_(grad, state1_fp32.sqrt().add_(eps), value=-lr)\n\n else:\n raise NotImplementedError(\n f\"Pure PyTorch implementation for optimizer '{optimizer_name}' is not available.\"\n )\n\n # Copy the updated 32-bit parameter back to the original tensor\n p.data.copy_(p_fp32)\n\n # Re-quantize states and update state tensors in-place\n if optimizer_name == \"ademamix\":\n new_m1_8bit, new_absmax_m1 = quantize_blockwise_triton(state1_fp32[0], qmap1, blocksize)\n new_m2_8bit, new_absmax_m2 = quantize_blockwise_triton(state1_fp32[1], qmap1, blocksize)\n state1[0].copy_(new_m1_8bit)\n state1[1].copy_(new_m2_8bit)\n absmax1[0].copy_(new_absmax_m1)\n absmax1[1].copy_(new_absmax_m2)\n\n new_state2_8bit, new_absmax2 = quantize_blockwise_triton(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n else:\n new_state1_8bit, new_absmax1 = quantize_blockwise_triton(state1_fp32, qmap1, blocksize)\n state1.copy_(new_state1_8bit)\n absmax1.copy_(new_absmax1)\n\n if state2_fp32 is not None:\n new_state2_8bit, new_absmax2 = quantize_blockwise_triton(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n\n\n#########################\n# Triton implementation #\n#########################\n\n\n@triton.jit\ndef _optimizer_update_1state_8bit_blockwise_triton_kernel(\n # Tensors\n p_ptr,\n g_ptr,\n state1_ptr,\n state2_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n step,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._optimizer_update_1state_8bit_blockwise_triton_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._optimizer_update_1state_8bit_blockwise_triton_kernel#L856-L935","kind":"function","name":"_optimizer_update_1state_8bit_blockwise_triton_kernel","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":856,"end_line":935,"context_start_line":836,"context_end_line":955,"code":" new_state2_8bit, new_absmax2 = quantize_blockwise_triton(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n else:\n new_state1_8bit, new_absmax1 = quantize_blockwise_triton(state1_fp32, qmap1, blocksize)\n state1.copy_(new_state1_8bit)\n absmax1.copy_(new_absmax1)\n\n if state2_fp32 is not None:\n new_state2_8bit, new_absmax2 = quantize_blockwise_triton(state2_fp32, qmap2, blocksize)\n state2.copy_(new_state2_8bit)\n absmax2.copy_(new_absmax2)\n\n\n#########################\n# Triton implementation #\n#########################\n\n\n@triton.jit\ndef _optimizer_update_1state_8bit_blockwise_triton_kernel(\n # Tensors\n p_ptr,\n g_ptr,\n state1_ptr,\n state2_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n beta3,\n alpha,\n eps: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n qmap1_ptr,\n qmap2_ptr,\n absmax1_ptr,\n absmax2_ptr,\n weight_decay,\n gnorm_scale,\n # Meta-parameters\n n_elements,\n BLOCK_SIZE_N: tl.constexpr,\n N_PER_TH: tl.constexpr,\n OPTIMIZER_ID: tl.constexpr,\n):\n \"\"\"\n Triton kernel for 8-bit optimizers that use one momentum state.\n Supports: Momentum, RMSprop, Adagrad, Lion.\n \"\"\"\n # 1. Boilerplate: pid, offsets, mask\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N * N_PER_TH)\n mask = offsets < n_elements\n\n # 2. Load and dequantize tensors\n g = tl.load(g_ptr + offsets, mask=mask, other=0.0).to(tl.float32) * gnorm_scale\n p = tl.load(p_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n s1 = dequant_8bit_blockwise_kernel_util(state1_ptr, offsets, qmap1_ptr, absmax1_ptr, mask, BLOCK_SIZE_N)\n\n # 3. Optimizer-specific updates\n # LION\n if weight_decay > 0.0 and OPTIMIZER_ID == 2:\n p *= 1.0 - lr * weight_decay\n # Apply weight decay for momentum, rmsprop, adagrad\n elif weight_decay > 0.0:\n g += p * weight_decay\n\n # Momentum update\n if OPTIMIZER_ID == 0: # MOMENTUM\n if step == 1:\n s1 = g\n else:\n s1 = s1 * beta1 + g\n p -= lr * s1\n\n # RMSprop update\n elif OPTIMIZER_ID == 1: # RMSPROP\n s1 = s1 * beta1 + (1.0 - beta1) * g * g\n p -= lr * (g / (tl.sqrt(s1) + eps))\n\n # Adagrad update\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1 += g * g\n p -= lr * (g / (tl.sqrt(s1) + eps))\n\n # Lion update\n elif OPTIMIZER_ID == 4: # LION\n val = s1 * beta1 + (1.0 - beta1) * g\n update = tl.where(val > 0.0, 1.0, tl.where(val < 0.0, -1.0, 0.0))\n p -= lr * update\n s1 = s1 * beta2 + (1.0 - beta2) * g\n\n # 4. Store updated parameter and requantized state\n tl.store(p_ptr + offsets, p.to(p_ptr.dtype.element_ty), mask=mask)\n s1_codes, new_absmax1 = quantize_8bit_blockwise_kernel_util(s1, qmap1_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state1_ptr + offsets, s1_codes, mask=mask)\n tl.store(absmax1_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax1)\n\n\n@triton.jit\ndef _optimizer_update_2state_8bit_blockwise_triton_kernel(\n # Tensors\n p_ptr,\n g_ptr,\n state1_ptr,\n state2_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n # ademamix changes alpha and beta3\n beta3,\n # ademamix changes alpha and beta3\n alpha,\n eps: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim._optimizer_update_2state_8bit_blockwise_triton_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim._optimizer_update_2state_8bit_blockwise_triton_kernel#L939-L1063","kind":"function","name":"_optimizer_update_2state_8bit_blockwise_triton_kernel","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":939,"end_line":1063,"context_start_line":919,"context_end_line":1083,"code":" # Adagrad update\n elif OPTIMIZER_ID == 2: # ADAGRAD\n s1 += g * g\n p -= lr * (g / (tl.sqrt(s1) + eps))\n\n # Lion update\n elif OPTIMIZER_ID == 4: # LION\n val = s1 * beta1 + (1.0 - beta1) * g\n update = tl.where(val > 0.0, 1.0, tl.where(val < 0.0, -1.0, 0.0))\n p -= lr * update\n s1 = s1 * beta2 + (1.0 - beta2) * g\n\n # 4. Store updated parameter and requantized state\n tl.store(p_ptr + offsets, p.to(p_ptr.dtype.element_ty), mask=mask)\n s1_codes, new_absmax1 = quantize_8bit_blockwise_kernel_util(s1, qmap1_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state1_ptr + offsets, s1_codes, mask=mask)\n tl.store(absmax1_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax1)\n\n\n@triton.jit\ndef _optimizer_update_2state_8bit_blockwise_triton_kernel(\n # Tensors\n p_ptr,\n g_ptr,\n state1_ptr,\n state2_ptr,\n beta1: tl.constexpr,\n beta2: tl.constexpr,\n # ademamix changes alpha and beta3\n beta3,\n # ademamix changes alpha and beta3\n alpha,\n eps: tl.constexpr,\n step,\n beta1_step,\n beta2_step,\n lr,\n qmap1_ptr,\n qmap2_ptr,\n absmax1_ptr,\n absmax2_ptr,\n weight_decay: tl.constexpr,\n gnorm_scale: tl.constexpr,\n # Meta-parameters\n n_elements,\n BLOCK_SIZE_N: tl.constexpr,\n N_PER_TH: tl.constexpr,\n OPTIMIZER_ID: tl.constexpr,\n):\n \"\"\"\n Triton kernel for 8-bit optimizers that use two momentum states.\n Supports: Adam, AdEMAMix.\n \"\"\"\n # 1. Boilerplate: pid, offsets, mask\n pid = tl.program_id(axis=0)\n block_start_idx = pid * N_PER_TH\n offsets = block_start_idx * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N * N_PER_TH)\n mask = offsets < n_elements\n\n # 2. Load and dequantize tensors\n g = tl.load(g_ptr + offsets, mask=mask, other=0.0).to(tl.float32) * gnorm_scale\n p = tl.load(p_ptr + offsets, mask=mask, other=0.0).to(tl.float32)\n\n # 3. Optimizer-specific updates\n if OPTIMIZER_ID == 3: # ADAM\n s1 = dequant_8bit_blockwise_kernel_util(state1_ptr, offsets, qmap1_ptr, absmax1_ptr, mask, BLOCK_SIZE_N)\n s2 = dequant_8bit_blockwise_kernel_util(state2_ptr, offsets, qmap2_ptr, absmax2_ptr, mask, BLOCK_SIZE_N)\n\n s1 = s1 * beta1 + (1.0 - beta1) * g\n s2 = s2 * beta2 + (1.0 - beta2) * g * g\n\n # In torch=2.7 on XPU there is an issue with libdevice.pow, leading to an error.\n # For backwards compatibility we precompute the bias correction factors.\n # bias_correction1 = 1.0 - libdevice.pow(beta1, step)\n # bias_correction2 = 1.0 - libdevice.pow(beta2, step)\n bias_correction1 = 1.0 - beta1_step\n bias_correction2 = 1.0 - beta2_step\n\n if weight_decay > 0.0:\n p *= 1.0 - lr * weight_decay\n\n denom = tl.sqrt(s2) / tl.sqrt(bias_correction2) + eps\n p -= (lr / bias_correction1) * (s1 / denom)\n\n # Store updated parameter\n tl.store(p_ptr + offsets, p.to(p_ptr.dtype.element_ty), mask=mask)\n\n # Requantize and store states\n s1_codes, new_absmax1 = quantize_8bit_blockwise_kernel_util(s1, qmap1_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state1_ptr + offsets, s1_codes, mask=mask)\n tl.store(absmax1_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax1)\n\n s2_codes, new_absmax2 = quantize_8bit_blockwise_kernel_util(s2, qmap2_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state2_ptr + offsets, s2_codes, mask=mask)\n tl.store(absmax2_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax2)\n\n elif OPTIMIZER_ID == 5: # ADEMAMIX\n # AdEMAMix has a stacked state1 (m1, m2) and state2 (nu)\n m1 = dequant_8bit_blockwise_kernel_util(state1_ptr, offsets, qmap1_ptr, absmax1_ptr, mask, BLOCK_SIZE_N)\n m2 = dequant_8bit_blockwise_kernel_util(\n state1_ptr + n_elements,\n offsets,\n qmap1_ptr,\n absmax1_ptr + n_elements // BLOCK_SIZE_N,\n mask,\n BLOCK_SIZE_N,\n )\n nu = dequant_8bit_blockwise_kernel_util(state2_ptr, offsets, qmap2_ptr, absmax2_ptr, mask, BLOCK_SIZE_N)\n\n m1 = m1 * beta1 + (1.0 - beta1) * g\n m2 = m2 * beta3 + (1.0 - beta3) * g\n nu = nu * beta2 + (1.0 - beta2) * g * g\n\n # In torch=2.7 on XPU there is an issue with libdevice.pow, leading to an error.\n # For backwards compatibility we precompute the bias correction factors.\n # bias_correction1 = 1.0 - libdevice.pow(beta1, step)\n # bias_correction2 = tl.sqrt(1.0 - libdevice.pow(beta2, step))\n bias_correction1 = 1.0 - beta1_step\n bias_correction2 = tl.sqrt(1.0 - beta2_step)\n\n update = (m1 / bias_correction1 + alpha * m2) / (tl.sqrt(nu) / bias_correction2 + eps)\n\n if weight_decay > 0.0:\n p *= 1.0 - lr * weight_decay\n\n p -= lr * update\n\n # Store updated parameter\n tl.store(p_ptr + offsets, p.to(p_ptr.dtype.element_ty), mask=mask)\n\n # Requantize and store all three states\n m1_codes, new_absmax_m1 = quantize_8bit_blockwise_kernel_util(m1, qmap1_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state1_ptr + offsets, m1_codes, mask=mask)\n tl.store(absmax1_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax_m1)\n\n m2_codes, new_absmax_m2 = quantize_8bit_blockwise_kernel_util(m2, qmap1_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state1_ptr + n_elements + offsets, m2_codes, mask=mask)\n tl.store(\n absmax1_ptr + block_start_idx + tl.arange(0, N_PER_TH) + n_elements // BLOCK_SIZE_N,\n new_absmax_m2,\n )\n\n nu_codes, new_absmax_nu = quantize_8bit_blockwise_kernel_util(nu, qmap2_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state2_ptr + offsets, nu_codes, mask=mask)\n tl.store(absmax2_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax_nu)\n\n\nname2optimizer_fn = {\n \"momentum\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"rmsprop\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"adagrad\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"adam\": _optimizer_update_2state_8bit_blockwise_triton_kernel,\n \"lion\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"ademamix\": _optimizer_update_2state_8bit_blockwise_triton_kernel,\n}\n\n\ndef optimizer_update_8bit_blockwise_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_optim.optimizer_update_8bit_blockwise_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_optim.optimizer_update_8bit_blockwise_impl#L1076-L1147","kind":"function","name":"optimizer_update_8bit_blockwise_impl","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":1076,"end_line":1147,"context_start_line":1056,"context_end_line":1154,"code":" tl.store(\n absmax1_ptr + block_start_idx + tl.arange(0, N_PER_TH) + n_elements // BLOCK_SIZE_N,\n new_absmax_m2,\n )\n\n nu_codes, new_absmax_nu = quantize_8bit_blockwise_kernel_util(nu, qmap2_ptr, 256, BLOCK_SIZE_N, N_PER_TH)\n tl.store(state2_ptr + offsets, nu_codes, mask=mask)\n tl.store(absmax2_ptr + block_start_idx + tl.arange(0, N_PER_TH), new_absmax_nu)\n\n\nname2optimizer_fn = {\n \"momentum\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"rmsprop\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"adagrad\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"adam\": _optimizer_update_2state_8bit_blockwise_triton_kernel,\n \"lion\": _optimizer_update_1state_8bit_blockwise_triton_kernel,\n \"ademamix\": _optimizer_update_2state_8bit_blockwise_triton_kernel,\n}\n\n\ndef optimizer_update_8bit_blockwise_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float = 0.0,\n gnorm_scale: float = 1.0,\n skip_zeros=False,\n) -> None:\n if skip_zeros:\n raise NotImplementedError(\"skip_zeros is not supported on XPU yet\")\n\n if optimizer_name == \"ademamix\":\n # Handle AdEMAMIX's stacked state tensors\n if state1.dim() < 2 or state1.shape[0] != 2:\n raise ValueError(\n f\"For ademamix, state1 must be a stacked tensor of shape (2, ...), but got {state1.shape}\"\n )\n if absmax1.dim() < 2 or absmax1.shape[0] != 2:\n raise ValueError(\n f\"For ademamix, absmax1 must be a stacked tensor of shape (2, ...), but got {absmax1.shape}\"\n )\n\n BLOCK_SIZE = 256\n N_PER_TH = 1 # Number of blocks processed per thread.\n grid = (triton.cdiv(p.numel(), BLOCK_SIZE * N_PER_TH),)\n fn = name2optimizer_fn[optimizer_name]\n optimizer_id = name2optimizer_id[optimizer_name]\n\n # In torch=2.7 on XPU there is an issue with libdevice.pow, leading to an error.\n # For backwards compatibility we precompute the bias correction factors.\n beta1_step = beta1**step\n beta2_step = beta2**step\n\n fn[grid](\n p,\n g,\n state1,\n state2,\n beta1,\n beta2,\n beta3,\n alpha,\n eps,\n step,\n beta1_step,\n beta2_step,\n lr,\n qmap1,\n qmap2,\n absmax1,\n absmax2,\n weight_decay,\n gnorm_scale,\n p.numel(),\n BLOCK_SIZE_N=BLOCK_SIZE,\n N_PER_TH=N_PER_TH,\n OPTIMIZER_ID=optimizer_id,\n num_warps=2,\n )\n\n\n# optimizer_update_8bit_blockwise_impl = optimizer_update_8bit_blockwise_pytorch\n# optimizer_update_8bit_blockwise_impl = torch.compile(optimizer_update_8bit_blockwise_pytorch_impl)\n# optimizer_update_8bit_blockwise_impl = optimizer_update_8bit_blockwise_triton_quant\n# optimizer_update_8bit_blockwise_impl = torch.compile(optimizer_update_8bit_blockwise_triton_quant)\noptimizer_update_8bit_blockwise_impl = optimizer_update_8bit_blockwise_impl","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant","uri":"program://bitsandbytes/module/bitsandbytes.backends.triton.kernels_8bit_quant#L1-L195","kind":"module","name":"bitsandbytes.backends.triton.kernels_8bit_quant","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":1,"end_line":195,"context_start_line":1,"context_end_line":195,"code":"import torch\n\nimport triton\nimport triton.language as tl\n\n\n# @triton.autotune(\n# configs=[\n# # triton.Config({'SPLIT_SIZE': 64}),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128}),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_SIZE\": 256}),\n# # triton.Config({'SPLIT_SIZE': 256, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 256, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# triton.Config({\"SPLIT_SIZE\": 512}),\n# # triton.Config({'SPLIT_SIZE': 1024}),\n# ],\n# key=[\"num_paired_elements\", \"QUANT_BLOCK\"],\n# )\n@triton.jit\ndef dequant_8bit_kernel(\n a_ptr,\n out_ptr,\n code_ptr,\n absmax_ptr,\n n,\n QUANT_BLOCK: tl.constexpr,\n SPLIT_SIZE: tl.constexpr,\n):\n pid = tl.program_id(axis=0)\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < n\n out_dq = dequant_8bit_blockwise_kernel_util(a_ptr, offsets, code_ptr, absmax_ptr, mask, QUANT_BLOCK)\n tl.store(out_ptr + offsets, out_dq, mask)\n\n\ndef dequant_8bit_blockwise(\n a: torch.Tensor,\n absmax: torch.Tensor,\n quant_state_code: torch.Tensor,\n quant_blocksize: int = 64,\n dtype: torch.dtype = None,\n out: torch.Tensor = None,\n):\n n = a.numel()\n if out is None:\n if dtype is None:\n raise ValueError(\"If out is None, dtype must be specified\")\n out = torch.empty_like(a, dtype=dtype, device=a.device)\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META[\"SPLIT_SIZE\"]),)\n grid = (triton.cdiv(n, SPLIT_SIZE),)\n dequant_8bit_kernel[grid](\n a,\n out,\n quant_state_code,\n absmax,\n n,\n quant_blocksize,\n SPLIT_SIZE,\n )\n return out\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_8bit_blockwise_kernel(\n A_ptr,\n code_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n CODE_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n block_start_idx = tl.program_id(0) * SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n quantized, absmax = quantize_8bit_blockwise_kernel_util(A, code_ptr, CODE_SIZE, BLOCK_SIZE, SPLIT_NUM_BLOCKS)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, SPLIT_NUM_BLOCKS), absmax)\n tl.store(out_ptr + offsets, quantized, mask=mask)\n\n\ndef quantize_blockwise_triton(A, code, blocksize, absmax=None, out=None):\n n = A.numel()\n blocks = -(n // -blocksize)\n\n if absmax is None:\n absmax = torch.empty((blocks,), device=A.device, dtype=A.dtype)\n if out is None:\n out = torch.empty_like(A.flatten(), dtype=torch.uint8)\n\n split_num_blocks = 1\n grid = (triton.cdiv(blocks, split_num_blocks),)\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n quantize_8bit_blockwise_kernel[grid](\n A_ptr=A,\n code_ptr=code,\n absmax_ptr=absmax,\n out_ptr=out,\n n_elements=n,\n BLOCK_SIZE=blocksize,\n CODE_SIZE=code.numel(),\n SPLIT_NUM_BLOCKS=split_num_blocks,\n # num_warps=1,\n # num_stages=2,\n )\n out = out.reshape(A.shape)\n\n return out, absmax\n\n\n@triton.jit\ndef quantize_8bit_blockwise_kernel_util(\n a,\n code_ptr,\n CODE_SIZE: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n # To be able process several blocks -> (BLOCK_SIZE, SPLIT_NUM_BLOCKS)\n a_reshaped = tl.reshape(a, (N_PER_TH, BLOCK_SIZE))\n\n # Calculating absmax for each block\n absmax = tl.max(tl.abs(a_reshaped), axis=1)\n\n a_normalized = a_reshaped / absmax[:, None]\n a_normalized = tl.clamp(a_normalized, -1.0, 1.0)\n\n lower_pivot = tl.zeros((N_PER_TH, BLOCK_SIZE), dtype=tl.int32)\n upper_pivot = tl.full((N_PER_TH, BLOCK_SIZE), CODE_SIZE - 1, dtype=tl.int32)\n\n # ceil(log2(code_size)) = 8, actually, in general case should be input parameter\n for _ in range(8):\n pivot = (lower_pivot + upper_pivot) // 2\n val = tl.load(code_ptr + pivot)\n is_higher = a_normalized > val # code[pivot]\n lower_pivot = tl.where(is_higher, pivot, lower_pivot)\n upper_pivot = tl.where(is_higher, upper_pivot, pivot)\n\n # Choose closest level\n lower_val = tl.load(code_ptr + lower_pivot)\n upper_val = tl.load(code_ptr + upper_pivot)\n lower_dist = tl.abs(a_normalized - lower_val)\n upper_dist = tl.abs(a_normalized - upper_val)\n quantized = tl.where(lower_dist <= upper_dist, lower_pivot, upper_pivot).to(tl.uint8)\n\n # too slow approach\n # diff = tl.abs(A_normalized[:, :, None] - code[None, None, :])\n # quantized = tl.argmin(diff, axis=2).to(tl.uint8)\n\n quantized_flat = tl.reshape(quantized, (BLOCK_SIZE * N_PER_TH,))\n return quantized_flat, absmax\n\n\n@triton.jit\ndef dequant_8bit_blockwise_kernel_util(\n a_ptr,\n offsets,\n code_ptr,\n absmax_ptr,\n mask,\n BLOCK_SIZE: tl.constexpr,\n):\n a = tl.load(a_ptr + offsets, mask, other=0).to(tl.uint8)\n scaled_int8 = tl.load(code_ptr + a, mask)\n # Load scales\n absmax_offsets = offsets // BLOCK_SIZE\n absmax = tl.load(absmax_ptr + absmax_offsets, mask=mask, other=0.0, eviction_policy=\"evict_last\")\n # Apply scales\n out_dq = scaled_int8 * absmax\n return out_dq","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant.dequant_8bit_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_8bit_quant.dequant_8bit_kernel#L28-L42","kind":"function","name":"dequant_8bit_kernel","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":28,"end_line":42,"context_start_line":8,"context_end_line":62,"code":"# configs=[\n# # triton.Config({'SPLIT_SIZE': 64}),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128}),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_SIZE\": 256}),\n# # triton.Config({'SPLIT_SIZE': 256, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 256, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# triton.Config({\"SPLIT_SIZE\": 512}),\n# # triton.Config({'SPLIT_SIZE': 1024}),\n# ],\n# key=[\"num_paired_elements\", \"QUANT_BLOCK\"],\n# )\n@triton.jit\ndef dequant_8bit_kernel(\n a_ptr,\n out_ptr,\n code_ptr,\n absmax_ptr,\n n,\n QUANT_BLOCK: tl.constexpr,\n SPLIT_SIZE: tl.constexpr,\n):\n pid = tl.program_id(axis=0)\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < n\n out_dq = dequant_8bit_blockwise_kernel_util(a_ptr, offsets, code_ptr, absmax_ptr, mask, QUANT_BLOCK)\n tl.store(out_ptr + offsets, out_dq, mask)\n\n\ndef dequant_8bit_blockwise(\n a: torch.Tensor,\n absmax: torch.Tensor,\n quant_state_code: torch.Tensor,\n quant_blocksize: int = 64,\n dtype: torch.dtype = None,\n out: torch.Tensor = None,\n):\n n = a.numel()\n if out is None:\n if dtype is None:\n raise ValueError(\"If out is None, dtype must be specified\")\n out = torch.empty_like(a, dtype=dtype, device=a.device)\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META[\"SPLIT_SIZE\"]),)\n grid = (triton.cdiv(n, SPLIT_SIZE),)\n dequant_8bit_kernel[grid](","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant.dequant_8bit_blockwise","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_8bit_quant.dequant_8bit_blockwise#L45-L71","kind":"function","name":"dequant_8bit_blockwise","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":45,"end_line":71,"context_start_line":25,"context_end_line":91,"code":"# key=[\"num_paired_elements\", \"QUANT_BLOCK\"],\n# )\n@triton.jit\ndef dequant_8bit_kernel(\n a_ptr,\n out_ptr,\n code_ptr,\n absmax_ptr,\n n,\n QUANT_BLOCK: tl.constexpr,\n SPLIT_SIZE: tl.constexpr,\n):\n pid = tl.program_id(axis=0)\n block_start = pid * SPLIT_SIZE\n offsets = block_start + tl.arange(0, SPLIT_SIZE)\n mask = offsets < n\n out_dq = dequant_8bit_blockwise_kernel_util(a_ptr, offsets, code_ptr, absmax_ptr, mask, QUANT_BLOCK)\n tl.store(out_ptr + offsets, out_dq, mask)\n\n\ndef dequant_8bit_blockwise(\n a: torch.Tensor,\n absmax: torch.Tensor,\n quant_state_code: torch.Tensor,\n quant_blocksize: int = 64,\n dtype: torch.dtype = None,\n out: torch.Tensor = None,\n):\n n = a.numel()\n if out is None:\n if dtype is None:\n raise ValueError(\"If out is None, dtype must be specified\")\n out = torch.empty_like(a, dtype=dtype, device=a.device)\n\n SPLIT_SIZE = 256\n # grid = lambda META: (triton.cdiv(number_of_paired_elements, META[\"SPLIT_SIZE\"]),)\n grid = (triton.cdiv(n, SPLIT_SIZE),)\n dequant_8bit_kernel[grid](\n a,\n out,\n quant_state_code,\n absmax,\n n,\n quant_blocksize,\n SPLIT_SIZE,\n )\n return out\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_8bit_blockwise_kernel(\n A_ptr,\n code_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n CODE_SIZE: tl.constexpr,","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant.quantize_8bit_blockwise_kernel","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_8bit_quant.quantize_8bit_blockwise_kernel#L84-L104","kind":"function","name":"quantize_8bit_blockwise_kernel","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":84,"end_line":104,"context_start_line":64,"context_end_line":124,"code":" out,\n quant_state_code,\n absmax,\n n,\n quant_blocksize,\n SPLIT_SIZE,\n )\n return out\n\n\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_8bit_blockwise_kernel(\n A_ptr,\n code_ptr,\n absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n CODE_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n block_start_idx = tl.program_id(0) * SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n quantized, absmax = quantize_8bit_blockwise_kernel_util(A, code_ptr, CODE_SIZE, BLOCK_SIZE, SPLIT_NUM_BLOCKS)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, SPLIT_NUM_BLOCKS), absmax)\n tl.store(out_ptr + offsets, quantized, mask=mask)\n\n\ndef quantize_blockwise_triton(A, code, blocksize, absmax=None, out=None):\n n = A.numel()\n blocks = -(n // -blocksize)\n\n if absmax is None:\n absmax = torch.empty((blocks,), device=A.device, dtype=A.dtype)\n if out is None:\n out = torch.empty_like(A.flatten(), dtype=torch.uint8)\n\n split_num_blocks = 1\n grid = (triton.cdiv(blocks, split_num_blocks),)\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n quantize_8bit_blockwise_kernel[grid](\n A_ptr=A,\n code_ptr=code,\n absmax_ptr=absmax,\n out_ptr=out,\n n_elements=n,","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant.quantize_blockwise_triton","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_8bit_quant.quantize_blockwise_triton#L107-L133","kind":"function","name":"quantize_blockwise_triton","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":107,"end_line":133,"context_start_line":87,"context_end_line":153,"code":" absmax_ptr,\n out_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n CODE_SIZE: tl.constexpr,\n SPLIT_NUM_BLOCKS: tl.constexpr,\n):\n block_start_idx = tl.program_id(0) * SPLIT_NUM_BLOCKS\n thread_idx = tl.arange(0, SPLIT_NUM_BLOCKS * BLOCK_SIZE)\n\n offsets = block_start_idx * BLOCK_SIZE + thread_idx\n mask = offsets < n_elements\n\n A = tl.load(A_ptr + offsets, mask=mask, other=0.0)\n\n quantized, absmax = quantize_8bit_blockwise_kernel_util(A, code_ptr, CODE_SIZE, BLOCK_SIZE, SPLIT_NUM_BLOCKS)\n tl.store(absmax_ptr + block_start_idx + tl.arange(0, SPLIT_NUM_BLOCKS), absmax)\n tl.store(out_ptr + offsets, quantized, mask=mask)\n\n\ndef quantize_blockwise_triton(A, code, blocksize, absmax=None, out=None):\n n = A.numel()\n blocks = -(n // -blocksize)\n\n if absmax is None:\n absmax = torch.empty((blocks,), device=A.device, dtype=A.dtype)\n if out is None:\n out = torch.empty_like(A.flatten(), dtype=torch.uint8)\n\n split_num_blocks = 1\n grid = (triton.cdiv(blocks, split_num_blocks),)\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n quantize_8bit_blockwise_kernel[grid](\n A_ptr=A,\n code_ptr=code,\n absmax_ptr=absmax,\n out_ptr=out,\n n_elements=n,\n BLOCK_SIZE=blocksize,\n CODE_SIZE=code.numel(),\n SPLIT_NUM_BLOCKS=split_num_blocks,\n # num_warps=1,\n # num_stages=2,\n )\n out = out.reshape(A.shape)\n\n return out, absmax\n\n\n@triton.jit\ndef quantize_8bit_blockwise_kernel_util(\n a,\n code_ptr,\n CODE_SIZE: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n # To be able process several blocks -> (BLOCK_SIZE, SPLIT_NUM_BLOCKS)\n a_reshaped = tl.reshape(a, (N_PER_TH, BLOCK_SIZE))\n\n # Calculating absmax for each block\n absmax = tl.max(tl.abs(a_reshaped), axis=1)\n\n a_normalized = a_reshaped / absmax[:, None]\n a_normalized = tl.clamp(a_normalized, -1.0, 1.0)\n\n lower_pivot = tl.zeros((N_PER_TH, BLOCK_SIZE), dtype=tl.int32)","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant.quantize_8bit_blockwise_kernel_util","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_8bit_quant.quantize_8bit_blockwise_kernel_util#L137-L176","kind":"function","name":"quantize_8bit_blockwise_kernel_util","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":137,"end_line":176,"context_start_line":117,"context_end_line":195,"code":" grid = (triton.cdiv(blocks, split_num_blocks),)\n # grid = lambda META: (triton.cdiv(blocks, META[\"SPLIT_NUM_BLOCKS\"]),)\n quantize_8bit_blockwise_kernel[grid](\n A_ptr=A,\n code_ptr=code,\n absmax_ptr=absmax,\n out_ptr=out,\n n_elements=n,\n BLOCK_SIZE=blocksize,\n CODE_SIZE=code.numel(),\n SPLIT_NUM_BLOCKS=split_num_blocks,\n # num_warps=1,\n # num_stages=2,\n )\n out = out.reshape(A.shape)\n\n return out, absmax\n\n\n@triton.jit\ndef quantize_8bit_blockwise_kernel_util(\n a,\n code_ptr,\n CODE_SIZE: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n N_PER_TH: tl.constexpr,\n):\n # To be able process several blocks -> (BLOCK_SIZE, SPLIT_NUM_BLOCKS)\n a_reshaped = tl.reshape(a, (N_PER_TH, BLOCK_SIZE))\n\n # Calculating absmax for each block\n absmax = tl.max(tl.abs(a_reshaped), axis=1)\n\n a_normalized = a_reshaped / absmax[:, None]\n a_normalized = tl.clamp(a_normalized, -1.0, 1.0)\n\n lower_pivot = tl.zeros((N_PER_TH, BLOCK_SIZE), dtype=tl.int32)\n upper_pivot = tl.full((N_PER_TH, BLOCK_SIZE), CODE_SIZE - 1, dtype=tl.int32)\n\n # ceil(log2(code_size)) = 8, actually, in general case should be input parameter\n for _ in range(8):\n pivot = (lower_pivot + upper_pivot) // 2\n val = tl.load(code_ptr + pivot)\n is_higher = a_normalized > val # code[pivot]\n lower_pivot = tl.where(is_higher, pivot, lower_pivot)\n upper_pivot = tl.where(is_higher, upper_pivot, pivot)\n\n # Choose closest level\n lower_val = tl.load(code_ptr + lower_pivot)\n upper_val = tl.load(code_ptr + upper_pivot)\n lower_dist = tl.abs(a_normalized - lower_val)\n upper_dist = tl.abs(a_normalized - upper_val)\n quantized = tl.where(lower_dist <= upper_dist, lower_pivot, upper_pivot).to(tl.uint8)\n\n # too slow approach\n # diff = tl.abs(A_normalized[:, :, None] - code[None, None, :])\n # quantized = tl.argmin(diff, axis=2).to(tl.uint8)\n\n quantized_flat = tl.reshape(quantized, (BLOCK_SIZE * N_PER_TH,))\n return quantized_flat, absmax\n\n\n@triton.jit\ndef dequant_8bit_blockwise_kernel_util(\n a_ptr,\n offsets,\n code_ptr,\n absmax_ptr,\n mask,\n BLOCK_SIZE: tl.constexpr,\n):\n a = tl.load(a_ptr + offsets, mask, other=0).to(tl.uint8)\n scaled_int8 = tl.load(code_ptr + a, mask)\n # Load scales\n absmax_offsets = offsets // BLOCK_SIZE\n absmax = tl.load(absmax_ptr + absmax_offsets, mask=mask, other=0.0, eviction_policy=\"evict_last\")\n # Apply scales\n out_dq = scaled_int8 * absmax\n return out_dq","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.triton.kernels_8bit_quant.dequant_8bit_blockwise_kernel_util","uri":"program://bitsandbytes/function/bitsandbytes.backends.triton.kernels_8bit_quant.dequant_8bit_blockwise_kernel_util#L180-L195","kind":"function","name":"dequant_8bit_blockwise_kernel_util","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":180,"end_line":195,"context_start_line":160,"context_end_line":195,"code":" is_higher = a_normalized > val # code[pivot]\n lower_pivot = tl.where(is_higher, pivot, lower_pivot)\n upper_pivot = tl.where(is_higher, upper_pivot, pivot)\n\n # Choose closest level\n lower_val = tl.load(code_ptr + lower_pivot)\n upper_val = tl.load(code_ptr + upper_pivot)\n lower_dist = tl.abs(a_normalized - lower_val)\n upper_dist = tl.abs(a_normalized - upper_val)\n quantized = tl.where(lower_dist <= upper_dist, lower_pivot, upper_pivot).to(tl.uint8)\n\n # too slow approach\n # diff = tl.abs(A_normalized[:, :, None] - code[None, None, :])\n # quantized = tl.argmin(diff, axis=2).to(tl.uint8)\n\n quantized_flat = tl.reshape(quantized, (BLOCK_SIZE * N_PER_TH,))\n return quantized_flat, absmax\n\n\n@triton.jit\ndef dequant_8bit_blockwise_kernel_util(\n a_ptr,\n offsets,\n code_ptr,\n absmax_ptr,\n mask,\n BLOCK_SIZE: tl.constexpr,\n):\n a = tl.load(a_ptr + offsets, mask, other=0).to(tl.uint8)\n scaled_int8 = tl.load(code_ptr + a, mask)\n # Load scales\n absmax_offsets = offsets // BLOCK_SIZE\n absmax = tl.load(absmax_ptr + absmax_offsets, mask=mask, other=0.0, eviction_policy=\"evict_last\")\n # Apply scales\n out_dq = scaled_int8 * absmax\n return out_dq","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cpu.ops","uri":"program://bitsandbytes/module/bitsandbytes.backends.cpu.ops#L1-L101","kind":"module","name":"bitsandbytes.backends.cpu.ops","path":"bitsandbytes/backends/cpu/ops.py","language":"python","start_line":1,"end_line":101,"context_start_line":1,"context_end_line":101,"code":"import ctypes as ct\nimport logging\n\nimport torch\n\nfrom bitsandbytes.functional import get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import ErrorHandlerMockBNBNativeLibrary, lib\n\nlogger = logging.getLogger(__name__)\n\n# torch._int_mm for s8@s8->s32 is supported on CPU from torch 2.4+.\n# However, we can overflow if we use this without AVX512_VNNI support.\n# This is fixed in torch 2.6+, so we set this as the minimum to be safe.\n# For more information: https://github.com/pytorch/pytorch/pull/136942\n# TODO(matthewdouglas): aarch64?\nif torch.__version__ >= (2, 6):\n\n @register_kernel(\"bitsandbytes::int8_linear_matmul\", \"cpu\")\n def _(A: torch.Tensor, B: torch.Tensor):\n return torch._int_mm(\n A.reshape(-1, A.shape[-1]),\n B.t(),\n ).reshape(*A.shape[:-1], B.shape[0])\n\n\nif not isinstance(lib, ErrorHandlerMockBNBNativeLibrary):\n\n @register_kernel(\"bitsandbytes::quantize_blockwise\", \"cpu\")\n def _(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n\n n = A.numel()\n\n # Only FP32 has c++ kernrl\n if A.dtype == torch.float32:\n blocks = -(n // -blocksize)\n\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)\n out = torch.empty_like(A, dtype=torch.uint8)\n\n lib.cquantize_blockwise_cpu_fp32(\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_longlong(blocksize),\n ct.c_longlong(n),\n )\n else:\n rem = n % blocksize\n has_rem = rem > 0\n blocks = n // blocksize + has_rem\n absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)\n A_reshaped = A.reshape(n)\n A_com = A_reshaped[: n - rem]\n A_com_reshaped = A_com.reshape(n // blocksize, blocksize)\n absmax[: blocks - has_rem] = torch.abs(A_com_reshaped).max(dim=-1)[0]\n scaled_A = torch.clamp(A_com_reshaped * (1 / absmax[: blocks - has_rem].view(-1, 1)), -1, 1)\n scaled_A = scaled_A.reshape(-1)\n if has_rem:\n absmax[-1] = torch.abs(A_reshaped[n - rem :]).max()\n scaled_A_rem = torch.clamp(A_reshaped[n - rem :] * (1 / absmax[-1]), -1, 1)\n scaled_A = torch.cat([scaled_A, scaled_A_rem], dim=0)\n\n diff = torch.abs(scaled_A.unsqueeze(-1) - code.to(scaled_A.device))\n out = torch.argmin(diff, dim=-1).to(torch.uint8).to(scaled_A.device).reshape(A.shape)\n\n return out, absmax\n\n @register_kernel(\"bitsandbytes::dequantize_blockwise\", \"cpu\")\n def _(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n ) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n\n # Only FP32 has c++ kernrl\n if dtype == torch.float32:\n out = torch.empty_like(A, dtype=dtype)\n\n lib.cdequantize_blockwise_cpu_fp32(\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_longlong(blocksize),\n ct.c_longlong(A.numel()),\n )\n else:\n out = code[A.reshape(-1).int()]\n blocks = out.shape[-1] // blocksize\n res = out.shape[-1] % blocksize\n if res != 0:\n out = torch.nn.functional.pad(out, (0, blocksize - res), mode=\"constant\", value=0)\n out = (out.view(-1, blocksize) * absmax.view(-1, 1)).to(dtype).reshape(-1)\n out = out[: blocks * blocksize + res]\n out = out.reshape(A.shape)\n\n return out","source_hash":"386883e4f935273743d6426b5a53ffc55bc3686f898041efbf234ba2d9962504","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cpu.ops._","uri":"program://bitsandbytes/function/bitsandbytes.backends.cpu.ops._#L73-L101","kind":"function","name":"_","path":"bitsandbytes/backends/cpu/ops.py","language":"python","start_line":73,"end_line":101,"context_start_line":53,"context_end_line":101,"code":" has_rem = rem > 0\n blocks = n // blocksize + has_rem\n absmax = torch.zeros((blocks,), device=A.device, dtype=torch.float32)\n A_reshaped = A.reshape(n)\n A_com = A_reshaped[: n - rem]\n A_com_reshaped = A_com.reshape(n // blocksize, blocksize)\n absmax[: blocks - has_rem] = torch.abs(A_com_reshaped).max(dim=-1)[0]\n scaled_A = torch.clamp(A_com_reshaped * (1 / absmax[: blocks - has_rem].view(-1, 1)), -1, 1)\n scaled_A = scaled_A.reshape(-1)\n if has_rem:\n absmax[-1] = torch.abs(A_reshaped[n - rem :]).max()\n scaled_A_rem = torch.clamp(A_reshaped[n - rem :] * (1 / absmax[-1]), -1, 1)\n scaled_A = torch.cat([scaled_A, scaled_A_rem], dim=0)\n\n diff = torch.abs(scaled_A.unsqueeze(-1) - code.to(scaled_A.device))\n out = torch.argmin(diff, dim=-1).to(torch.uint8).to(scaled_A.device).reshape(A.shape)\n\n return out, absmax\n\n @register_kernel(\"bitsandbytes::dequantize_blockwise\", \"cpu\")\n def _(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n ) -> torch.Tensor:\n torch._check_is_size(blocksize)\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n\n # Only FP32 has c++ kernrl\n if dtype == torch.float32:\n out = torch.empty_like(A, dtype=dtype)\n\n lib.cdequantize_blockwise_cpu_fp32(\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_longlong(blocksize),\n ct.c_longlong(A.numel()),\n )\n else:\n out = code[A.reshape(-1).int()]\n blocks = out.shape[-1] // blocksize\n res = out.shape[-1] % blocksize\n if res != 0:\n out = torch.nn.functional.pad(out, (0, blocksize - res), mode=\"constant\", value=0)\n out = (out.view(-1, blocksize) * absmax.view(-1, 1)).to(dtype).reshape(-1)\n out = out[: blocks * blocksize + res]\n out = out.reshape(A.shape)\n\n return out","source_hash":"386883e4f935273743d6426b5a53ffc55bc3686f898041efbf234ba2d9962504","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops","uri":"program://bitsandbytes/module/bitsandbytes.backends.cuda.ops#L1-L766","kind":"module","name":"bitsandbytes.backends.cuda.ops","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":1,"end_line":766,"context_start_line":1,"context_end_line":766,"code":"from collections.abc import Sequence\nimport ctypes as ct\nfrom math import prod\nfrom typing import Optional\n\nimport torch\n\nfrom bitsandbytes.functional import CUBLAS_Context, _cuda_device_of, _get_tensor_stream, get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import HIP_ENVIRONMENT, lib\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul\", \"cuda\")\ndef _(A: torch.Tensor, B: torch.Tensor):\n out = torch.empty((*A.shape[:-1], B.shape[0]), device=A.device, dtype=torch.int32)\n return _int8_linear_matmul_impl(A, B, out)\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul.out\", \"cuda\")\ndef _(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n _int8_linear_matmul_impl(A, B, out)\n\n\ndef _int8_linear_matmul_impl(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n A, B = B, A\n\n shapeA = A.shape\n shapeB = B.shape\n\n torch._check(A.dtype == torch.int8, lambda: \"B must be int8\")\n torch._check(B.dtype == torch.int8, lambda: \"A must be int8\")\n torch._check(A.ndim == 2, lambda: \"Only two dimensional matrices are supported for argument B\")\n torch._check(B.ndim in [2, 3], lambda: \"Only two or three dimensional matrices are supported for argument A\")\n torch._check(prod(shapeB) > 0, lambda: f\"Input tensor dimensions need to be > 0: {shapeB}\")\n torch._check(out.dtype == torch.int32)\n\n shapeC = (*shapeB[:-1], shapeA[0])\n torch._check(out.shape == shapeC, lambda: f\"Output shape {out.shape} does not match expected shape {shapeC}\")\n\n k, m = shapeA\n n = prod(shapeB[:-1])\n lda = shapeA[-1] # Weights (outputs, inputs)\n ldb = shapeB[-1] # Activations (batch, tokens, inputs)\n ldc = shapeC[-1] # Output (batch, tokens, outputs)\n\n torch._check(\n lda == ldb,\n lambda: f\"int8_linear_matmul only supports B^T @ A. Inner dimensions do not match: B @ A = {shapeB} @ {shapeA}\",\n )\n\n # cuBLASLt does not support int8 matmul with inner dimensions that are not divisible by 4.\n # We'll fall back to a slower fp32 calculation in this circumstance.\n # Fortunately, this should not be very common.\n if lda % 4 != 0:\n result = torch.matmul(B.float(), A.float().t()).to(torch.int32)\n return out.copy_(result)\n\n with _cuda_device_of(A):\n ctx = CUBLAS_Context.get_instance().get_context(A.device)\n ptrA = get_ptr(A)\n ptrB = get_ptr(B)\n ptrC = get_ptr(out)\n ptrRowScale = None\n m = ct.c_int32(m)\n n = ct.c_int32(n)\n k = ct.c_int32(k)\n lda = ct.c_int32(lda)\n ldb = ct.c_int32(ldb)\n ldc = ct.c_int32(ldc)\n stream = _get_tensor_stream(A)\n\n has_error = lib.cigemmlt_32(ctx, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc, stream)\n\n if has_error:\n if has_error == 100:\n # `ERR_NOT_IMPLEMENTED` is defined as 100 in `ops.cu`\n # TODO: Warn and implement a fallback to fp32 compute?\n raise NotImplementedError(\"int8_linear_matmul not implemented!\")\n else:\n raise RuntimeError(\n f\"cublasLt ran into an error!\\n\\t{shapeA=}, {shapeB=}, {shapeC=}\\n\\t{(lda, ldb, ldc)=}\\n\\t{(m, n, k)=}\"\n )\n\n return out\n\n\n@register_kernel(\"bitsandbytes::int8_mm_dequant\", \"cuda\")\ndef _(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n bias: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n torch._check(A.dtype == torch.int32, lambda: f\"A must be int32, got {A.dtype}\")\n torch._check(row_stats.dtype == torch.float32, lambda: f\"row_stats must be float32, got {row_stats.dtype}\")\n torch._check(col_stats.dtype == torch.float32, lambda: f\"col_stats must be float32, got {col_stats.dtype}\")\n\n # Note: cuda kernel only currently supports fp16 output.\n # We'll later cast to desired dtype if needed.\n out = torch.empty_like(A, dtype=torch.float16)\n\n ptrA = get_ptr(A)\n ptrOut = get_ptr(out)\n ptrRowStats = get_ptr(row_stats)\n ptrColStats = get_ptr(col_stats)\n numRows = ct.c_int32(prod(A.shape[:-1]))\n numCols = ct.c_int32(A.shape[-1])\n\n # Note: fused bias in the kernel is only supported for fp16\n # TODO(matthewdouglas): Consider supporting bf16 fused bias\n ptrBias = get_ptr(bias) if bias is not None and bias.dtype == torch.float16 else None\n\n with _cuda_device_of(A):\n lib.cdequant_mm_int32_fp16(\n ptrA, ptrRowStats, ptrColStats, ptrOut, ptrBias, numRows, numCols, _get_tensor_stream(A)\n )\n\n # Add bias separately if not fused in kernel\n if bias is not None and bias.dtype != torch.float16:\n out.add_(bias)\n\n return out.to(dtype or torch.float16)\n\n\n@register_kernel(\"bitsandbytes::int8_vectorwise_quant\", \"cuda\")\ndef _(A: torch.Tensor, threshold=0.0):\n torch._check(A.dtype == torch.float16, lambda: f\"A must be float16, got {A.dtype}\")\n torch._check(threshold >= 0.0, lambda: \"threshold must be non-negative\")\n\n rows = prod(A.shape[:-1])\n cols = A.shape[-1]\n\n row_stats = torch.empty(rows, device=A.device, dtype=torch.float32)\n out_row = torch.empty(A.shape, device=A.device, dtype=torch.int8)\n\n outlier_cols = None\n\n if threshold > 0.0:\n # TODO we could improve perf of this\n outliers = A.abs() >= threshold\n\n if outliers.any():\n outlier_cols = torch.argwhere(outliers.any(dim=0)).view(-1)\n else:\n # Needed for torch.compile support.\n outlier_cols = torch.empty(0, device=A.device, dtype=torch.int64)\n\n with _cuda_device_of(A):\n lib.cint8_vector_quant(\n get_ptr(A),\n get_ptr(out_row),\n get_ptr(row_stats),\n ct.c_float(threshold),\n ct.c_int32(rows),\n ct.c_int32(cols),\n _get_tensor_stream(A),\n )\n\n # Zero out values from outlier columns across all rows.\n # The kernel will handle this for outliers themselves, so we can optimize for rows=1.\n if rows > 1 and outlier_cols is not None:\n out_row[:, outlier_cols] = 0\n\n return out_row, row_stats, outlier_cols\n\n\n@register_kernel(\"bitsandbytes::int8_double_quant\", \"cuda\")\ndef _(\n A: torch.Tensor,\n threshold=0.0,\n) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n # Use CUDA kernel for rowwise and COO tensor\n quant_row, row_stats, outlier_cols = torch.ops.bitsandbytes.int8_vectorwise_quant.default(\n A,\n threshold=threshold,\n )\n\n # PyTorch impl for colwise\n col_stats, outlier_mask = _get_col_absmax(A, threshold=threshold)\n if threshold > 0.0 and outlier_mask is not None:\n A = A.masked_fill(outlier_mask, 0.0)\n quant_col = torch.round(A.mul(127.0) / col_stats.unsqueeze(0)).to(torch.int8)\n\n return quant_row, quant_col, row_stats, col_stats.flatten().float(), outlier_cols\n\n\ndef _get_col_absmax(\n A: torch.Tensor,\n threshold=0.0,\n) -> tuple[torch.Tensor, Optional[torch.Tensor]]:\n torch._check(A.is_floating_point())\n\n outlier_mask = None\n\n absA = A.abs().view(-1, A.shape[-1])\n\n if threshold > 0.0:\n # Filter outliers from stats when enabled\n outlier_mask = absA >= threshold\n absA.masked_fill_(outlier_mask, 0.0)\n\n # shape [cols]; unsqueeze(0) gives [1,cols]\n col_stats = absA.amax(dim=0, keepdim=False).float()\n\n return col_stats, outlier_mask\n\n\n@register_kernel(\"bitsandbytes::quantize_blockwise\", \"cuda\")\ndef _(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(code.dtype == torch.float32, lambda: f\"code must be float32, got {code.dtype}\")\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)\n out = torch.empty_like(A, dtype=torch.uint8)\n\n with _cuda_device_of(A):\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int32(blocksize),\n ct.c_int(A.numel()),\n )\n\n if A.dtype == torch.float16:\n lib.cquantize_blockwise_fp16(*args)\n elif A.dtype == torch.bfloat16:\n lib.cquantize_blockwise_bf16(*args)\n elif A.dtype == torch.float32:\n lib.cquantize_blockwise_fp32(*args)\n else:\n raise ValueError(f\"Blockwise quantization only supports 16/32-bit floats, but got {A.dtype}\")\n\n return out, absmax\n\n\n@register_kernel(\"bitsandbytes::dequantize_blockwise\", \"cuda\")\ndef _(A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype) -> torch.Tensor:\n out = torch.empty_like(A, dtype=dtype)\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_blockwise.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n\n\ndef _dequantize_blockwise_impl(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n) -> None:\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(\n dtype in [torch.float16, torch.bfloat16, torch.float32],\n lambda: f\"Blockwise dequantization only supports 16bit/32bit floating types, got {dtype}\",\n )\n\n with _cuda_device_of(A):\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(A.numel()),\n _get_tensor_stream(A),\n )\n\n if dtype == torch.float16:\n lib.cdequantize_blockwise_fp16(*args)\n elif dtype == torch.bfloat16:\n lib.cdequantize_blockwise_bf16(*args)\n elif dtype == torch.float32:\n lib.cdequantize_blockwise_fp32(*args)\n\n\n@register_kernel(\"bitsandbytes::quantize_4bit\", \"cuda\")\ndef _(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(quant_type in [\"fp4\", \"nf4\"])\n torch._check(\n A.dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit quantization only supports 16/32-bit floats, but got {A.dtype}\",\n )\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)\n out = torch.empty(((n + 1) // (quant_storage.itemsize * 2), 1), device=A.device, dtype=quant_storage)\n\n with _cuda_device_of(A):\n args = (\n None,\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int32(blocksize),\n ct.c_int(n),\n )\n\n if A.dtype == torch.bfloat16:\n if quant_type == \"fp4\":\n lib.cquantize_blockwise_bf16_fp4(*args)\n else:\n lib.cquantize_blockwise_bf16_nf4(*args)\n elif A.dtype == torch.float16:\n if quant_type == \"fp4\":\n lib.cquantize_blockwise_fp16_fp4(*args)\n else:\n lib.cquantize_blockwise_fp16_nf4(*args)\n elif A.dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cquantize_blockwise_fp32_fp4(*args)\n else:\n lib.cquantize_blockwise_fp32_nf4(*args)\n\n return out, absmax\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit\", \"cuda\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n) -> torch.Tensor:\n out = torch.empty(shape, dtype=dtype, device=A.device)\n _dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n _dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n\ndef _dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(quant_type in [\"fp4\", \"nf4\"])\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n\n with _cuda_device_of(A):\n args = (\n None,\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(out.numel()),\n _get_tensor_stream(A),\n )\n\n if out.dtype == torch.bfloat16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_bf16_fp4(*args)\n else:\n lib.cdequantize_blockwise_bf16_nf4(*args)\n elif out.dtype == torch.float16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp16_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp16_nf4(*args)\n elif out.dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp32_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp32_nf4(*args)\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit\", \"cuda\")\ndef _(\n A: torch.Tensor, B: torch.Tensor, shapeB: Sequence[int], absmax: torch.Tensor, code: torch.Tensor, blocksize: int\n) -> torch.Tensor:\n shape = (*A.shape[:-1], shapeB[0])\n out = torch.empty(shape, device=A.device, dtype=A.dtype)\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check(\n out.shape == (*A.shape[:-1], shapeB[0]),\n lambda: f\"Expected out.shape == {(*A.shape[:-1], shapeB[0])}, got {out.shape}\",\n )\n torch._check(out.dtype == A.dtype, lambda: f\"Expected out.dtype == {A.dtype}, got {out.dtype}\")\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n\n\ndef _gemv_4bit_impl(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n\n # Note: these checks are not strictly necessary, and cost more than they are worth, so they are commented out for now.\n # torch._check(\n # A.numel() == A.size(-1),\n # lambda: f\"A must be a vector with leading dimensions of 1, got {A.shape}\",\n # )\n # torch._check(\n # A.dtype in [torch.float16, torch.bfloat16, torch.float32],\n # lambda: f\"A must be float16, bfloat16, or float32, got {A.dtype}\",\n # )\n # torch._check(\n # B.dtype in [torch.uint8, torch.bfloat16, torch.float16, torch.float32],\n # lambda: f\"B must be backed by storage of type uint8, bfloat16, float16, or float32, got {B.dtype}\",\n # )\n # torch._check(absmax.dtype == torch.float32, lambda: f\"absmax must be float32, got {absmax.dtype}\")\n # torch._check(code.dtype == torch.float32, lambda: f\"code must be float32, got {code.dtype}\")\n\n m = ct.c_int32(shapeB[0])\n n = ct.c_int32(1)\n k = ct.c_int32(shapeB[1])\n\n lda = m\n ldb = ct.c_int32((A.shape[-1] + 1) // 2)\n ldc = m\n\n stream = _get_tensor_stream(A)\n\n with _cuda_device_of(A):\n if A.dtype == torch.float16:\n lib.cgemm_4bit_inference_naive_fp16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.bfloat16:\n lib.cgemm_4bit_inference_naive_bf16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.float32:\n lib.cgemm_4bit_inference_naive_fp32(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n\n\n\"\"\"C FUNCTIONS FOR OPTIMIZERS\"\"\"\nstr2optimizer32bit = {\n \"adam\": (\n lib.cadam32bit_grad_fp32,\n lib.cadam32bit_grad_fp16,\n lib.cadam32bit_grad_bf16,\n ),\n \"momentum\": (\n lib.cmomentum32bit_grad_32,\n lib.cmomentum32bit_grad_16,\n ),\n \"rmsprop\": (\n lib.crmsprop32bit_grad_32,\n lib.crmsprop32bit_grad_16,\n ),\n \"lion\": (\n lib.clion32bit_grad_fp32,\n lib.clion32bit_grad_fp16,\n lib.clion32bit_grad_bf16,\n ),\n \"adagrad\": (\n lib.cadagrad32bit_grad_32,\n lib.cadagrad32bit_grad_16,\n ),\n \"lamb\": (\n lib.cadam32bit_grad_fp32,\n lib.cadam32bit_grad_fp16,\n lib.cadam32bit_grad_bf16,\n ),\n \"ademamix\": (\n lib.cademamix32bit_grad_fp32,\n lib.cademamix32bit_grad_fp16,\n lib.cademamix32bit_grad_bf16,\n ),\n}\n\nstr2optimizer8bit_blockwise = {\n \"adam\": (\n lib.cadam_8bit_blockwise_grad_fp32,\n lib.cadam_8bit_blockwise_grad_fp16,\n lib.cadam_8bit_blockwise_grad_bf16,\n ),\n \"momentum\": (\n lib.cmomentum_8bit_blockwise_grad_fp32,\n lib.cmomentum_8bit_blockwise_grad_fp16,\n lib.cmomentum_8bit_blockwise_grad_bf16,\n ),\n \"rmsprop\": (\n lib.crmsprop_8bit_blockwise_grad_fp32,\n lib.crmsprop_8bit_blockwise_grad_fp16,\n lib.crmsprop_8bit_blockwise_grad_bf16,\n ),\n \"lion\": (\n lib.clion_8bit_blockwise_grad_fp32,\n lib.clion_8bit_blockwise_grad_fp16,\n lib.clion_8bit_blockwise_grad_bf16,\n ),\n \"adagrad\": (\n lib.cadagrad_8bit_blockwise_grad_fp32,\n lib.cadagrad_8bit_blockwise_grad_fp16,\n lib.cadagrad_8bit_blockwise_grad_bf16,\n ),\n \"ademamix\": (\n lib.cademamix_8bit_blockwise_grad_fp32,\n lib.cademamix_8bit_b\n# ... truncated ...","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._#L438-L452","kind":"function","name":"_","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":438,"end_line":452,"context_start_line":418,"context_end_line":472,"code":" else:\n lib.cdequantize_blockwise_fp16_nf4(*args)\n elif out.dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp32_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp32_nf4(*args)\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit\", \"cuda\")\ndef _(\n A: torch.Tensor, B: torch.Tensor, shapeB: Sequence[int], absmax: torch.Tensor, code: torch.Tensor, blocksize: int\n) -> torch.Tensor:\n shape = (*A.shape[:-1], shapeB[0])\n out = torch.empty(shape, device=A.device, dtype=A.dtype)\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check(\n out.shape == (*A.shape[:-1], shapeB[0]),\n lambda: f\"Expected out.shape == {(*A.shape[:-1], shapeB[0])}, got {out.shape}\",\n )\n torch._check(out.dtype == A.dtype, lambda: f\"Expected out.dtype == {A.dtype}, got {out.dtype}\")\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n\n\ndef _gemv_4bit_impl(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n\n # Note: these checks are not strictly necessary, and cost more than they are worth, so they are commented out for now.\n # torch._check(\n # A.numel() == A.size(-1),\n # lambda: f\"A must be a vector with leading dimensions of 1, got {A.shape}\",\n # )\n # torch._check(\n # A.dtype in [torch.float16, torch.bfloat16, torch.float32],","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._int8_linear_matmul_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._int8_linear_matmul_impl#L25-L85","kind":"function","name":"_int8_linear_matmul_impl","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":25,"end_line":85,"context_start_line":5,"context_end_line":105,"code":"\nimport torch\n\nfrom bitsandbytes.functional import CUBLAS_Context, _cuda_device_of, _get_tensor_stream, get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import HIP_ENVIRONMENT, lib\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul\", \"cuda\")\ndef _(A: torch.Tensor, B: torch.Tensor):\n out = torch.empty((*A.shape[:-1], B.shape[0]), device=A.device, dtype=torch.int32)\n return _int8_linear_matmul_impl(A, B, out)\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul.out\", \"cuda\")\ndef _(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n _int8_linear_matmul_impl(A, B, out)\n\n\ndef _int8_linear_matmul_impl(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):\n A, B = B, A\n\n shapeA = A.shape\n shapeB = B.shape\n\n torch._check(A.dtype == torch.int8, lambda: \"B must be int8\")\n torch._check(B.dtype == torch.int8, lambda: \"A must be int8\")\n torch._check(A.ndim == 2, lambda: \"Only two dimensional matrices are supported for argument B\")\n torch._check(B.ndim in [2, 3], lambda: \"Only two or three dimensional matrices are supported for argument A\")\n torch._check(prod(shapeB) > 0, lambda: f\"Input tensor dimensions need to be > 0: {shapeB}\")\n torch._check(out.dtype == torch.int32)\n\n shapeC = (*shapeB[:-1], shapeA[0])\n torch._check(out.shape == shapeC, lambda: f\"Output shape {out.shape} does not match expected shape {shapeC}\")\n\n k, m = shapeA\n n = prod(shapeB[:-1])\n lda = shapeA[-1] # Weights (outputs, inputs)\n ldb = shapeB[-1] # Activations (batch, tokens, inputs)\n ldc = shapeC[-1] # Output (batch, tokens, outputs)\n\n torch._check(\n lda == ldb,\n lambda: f\"int8_linear_matmul only supports B^T @ A. Inner dimensions do not match: B @ A = {shapeB} @ {shapeA}\",\n )\n\n # cuBLASLt does not support int8 matmul with inner dimensions that are not divisible by 4.\n # We'll fall back to a slower fp32 calculation in this circumstance.\n # Fortunately, this should not be very common.\n if lda % 4 != 0:\n result = torch.matmul(B.float(), A.float().t()).to(torch.int32)\n return out.copy_(result)\n\n with _cuda_device_of(A):\n ctx = CUBLAS_Context.get_instance().get_context(A.device)\n ptrA = get_ptr(A)\n ptrB = get_ptr(B)\n ptrC = get_ptr(out)\n ptrRowScale = None\n m = ct.c_int32(m)\n n = ct.c_int32(n)\n k = ct.c_int32(k)\n lda = ct.c_int32(lda)\n ldb = ct.c_int32(ldb)\n ldc = ct.c_int32(ldc)\n stream = _get_tensor_stream(A)\n\n has_error = lib.cigemmlt_32(ctx, m, n, k, ptrA, ptrB, ptrC, ptrRowScale, lda, ldb, ldc, stream)\n\n if has_error:\n if has_error == 100:\n # `ERR_NOT_IMPLEMENTED` is defined as 100 in `ops.cu`\n # TODO: Warn and implement a fallback to fp32 compute?\n raise NotImplementedError(\"int8_linear_matmul not implemented!\")\n else:\n raise RuntimeError(\n f\"cublasLt ran into an error!\\n\\t{shapeA=}, {shapeB=}, {shapeC=}\\n\\t{(lda, ldb, ldc)=}\\n\\t{(m, n, k)=}\"\n )\n\n return out\n\n\n@register_kernel(\"bitsandbytes::int8_mm_dequant\", \"cuda\")\ndef _(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n bias: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n torch._check(A.dtype == torch.int32, lambda: f\"A must be int32, got {A.dtype}\")\n torch._check(row_stats.dtype == torch.float32, lambda: f\"row_stats must be float32, got {row_stats.dtype}\")\n torch._check(col_stats.dtype == torch.float32, lambda: f\"col_stats must be float32, got {col_stats.dtype}\")\n\n # Note: cuda kernel only currently supports fp16 output.\n # We'll later cast to desired dtype if needed.\n out = torch.empty_like(A, dtype=torch.float16)\n\n ptrA = get_ptr(A)\n ptrOut = get_ptr(out)","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._get_col_absmax","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._get_col_absmax#L189-L207","kind":"function","name":"_get_col_absmax","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":189,"end_line":207,"context_start_line":169,"context_end_line":227,"code":"@register_kernel(\"bitsandbytes::int8_double_quant\", \"cuda\")\ndef _(\n A: torch.Tensor,\n threshold=0.0,\n) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n # Use CUDA kernel for rowwise and COO tensor\n quant_row, row_stats, outlier_cols = torch.ops.bitsandbytes.int8_vectorwise_quant.default(\n A,\n threshold=threshold,\n )\n\n # PyTorch impl for colwise\n col_stats, outlier_mask = _get_col_absmax(A, threshold=threshold)\n if threshold > 0.0 and outlier_mask is not None:\n A = A.masked_fill(outlier_mask, 0.0)\n quant_col = torch.round(A.mul(127.0) / col_stats.unsqueeze(0)).to(torch.int8)\n\n return quant_row, quant_col, row_stats, col_stats.flatten().float(), outlier_cols\n\n\ndef _get_col_absmax(\n A: torch.Tensor,\n threshold=0.0,\n) -> tuple[torch.Tensor, Optional[torch.Tensor]]:\n torch._check(A.is_floating_point())\n\n outlier_mask = None\n\n absA = A.abs().view(-1, A.shape[-1])\n\n if threshold > 0.0:\n # Filter outliers from stats when enabled\n outlier_mask = absA >= threshold\n absA.masked_fill_(outlier_mask, 0.0)\n\n # shape [cols]; unsqueeze(0) gives [1,cols]\n col_stats = absA.amax(dim=0, keepdim=False).float()\n\n return col_stats, outlier_mask\n\n\n@register_kernel(\"bitsandbytes::quantize_blockwise\", \"cuda\")\ndef _(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(code.dtype == torch.float32, lambda: f\"code must be float32, got {code.dtype}\")\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)\n out = torch.empty_like(A, dtype=torch.uint8)\n\n with _cuda_device_of(A):\n args = (","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._dequantize_blockwise_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._dequantize_blockwise_impl#L269-L299","kind":"function","name":"_dequantize_blockwise_impl","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":269,"end_line":299,"context_start_line":249,"context_end_line":319,"code":"def _(A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype) -> torch.Tensor:\n out = torch.empty_like(A, dtype=dtype)\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_blockwise.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n\n\ndef _dequantize_blockwise_impl(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n) -> None:\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(A.dtype == torch.uint8, lambda: f\"A must be uint8, got {A.dtype}\")\n torch._check(\n dtype in [torch.float16, torch.bfloat16, torch.float32],\n lambda: f\"Blockwise dequantization only supports 16bit/32bit floating types, got {dtype}\",\n )\n\n with _cuda_device_of(A):\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(A.numel()),\n _get_tensor_stream(A),\n )\n\n if dtype == torch.float16:\n lib.cdequantize_blockwise_fp16(*args)\n elif dtype == torch.bfloat16:\n lib.cdequantize_blockwise_bf16(*args)\n elif dtype == torch.float32:\n lib.cdequantize_blockwise_fp32(*args)\n\n\n@register_kernel(\"bitsandbytes::quantize_4bit\", \"cuda\")\ndef _(\n A: torch.Tensor, blocksize: int, quant_type: str, quant_storage: torch.dtype\n) -> tuple[torch.Tensor, torch.Tensor]:\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(quant_type in [\"fp4\", \"nf4\"])\n torch._check(\n A.dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit quantization only supports 16/32-bit floats, but got {A.dtype}\",\n )\n\n n = A.numel()\n blocks = -(n // -blocksize)\n absmax = torch.empty((blocks,), device=A.device, dtype=torch.float32)","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._dequantize_4bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._dequantize_4bit_impl#L380-L424","kind":"function","name":"_dequantize_4bit_impl","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":380,"end_line":424,"context_start_line":360,"context_end_line":444,"code":" out = torch.empty(shape, dtype=dtype, device=A.device)\n _dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n torch._check(out.shape == shape, lambda: f\"Expected out.shape == {shape}, got {out.shape}\")\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n _dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n\n\ndef _dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n if HIP_ENVIRONMENT:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128])\n else:\n torch._check(blocksize in [4096, 2048, 1024, 512, 256, 128, 64])\n\n torch._check(quant_type in [\"fp4\", \"nf4\"])\n torch._check(\n dtype in [torch.bfloat16, torch.float16, torch.float32],\n lambda: f\"Blockwise 4bit dequantization only supports 16/32-bit floats, but got {dtype}\",\n )\n\n with _cuda_device_of(A):\n args = (\n None,\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(out.numel()),\n _get_tensor_stream(A),\n )\n\n if out.dtype == torch.bfloat16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_bf16_fp4(*args)\n else:\n lib.cdequantize_blockwise_bf16_nf4(*args)\n elif out.dtype == torch.float16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp16_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp16_nf4(*args)\n elif out.dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp32_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp32_nf4(*args)\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit\", \"cuda\")\ndef _(\n A: torch.Tensor, B: torch.Tensor, shapeB: Sequence[int], absmax: torch.Tensor, code: torch.Tensor, blocksize: int\n) -> torch.Tensor:\n shape = (*A.shape[:-1], shapeB[0])\n out = torch.empty(shape, device=A.device, dtype=A.dtype)\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n return out\n\n\n@register_kernel(\"bitsandbytes::gemv_4bit.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._gemv_4bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._gemv_4bit_impl#L455-L540","kind":"function","name":"_gemv_4bit_impl","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":455,"end_line":540,"context_start_line":435,"context_end_line":560,"code":"\n\n@register_kernel(\"bitsandbytes::gemv_4bit.out\", \"cuda\")\ndef _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check(\n out.shape == (*A.shape[:-1], shapeB[0]),\n lambda: f\"Expected out.shape == {(*A.shape[:-1], shapeB[0])}, got {out.shape}\",\n )\n torch._check(out.dtype == A.dtype, lambda: f\"Expected out.dtype == {A.dtype}, got {out.dtype}\")\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n\n\ndef _gemv_4bit_impl(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n torch._check_is_size(blocksize)\n\n # Note: these checks are not strictly necessary, and cost more than they are worth, so they are commented out for now.\n # torch._check(\n # A.numel() == A.size(-1),\n # lambda: f\"A must be a vector with leading dimensions of 1, got {A.shape}\",\n # )\n # torch._check(\n # A.dtype in [torch.float16, torch.bfloat16, torch.float32],\n # lambda: f\"A must be float16, bfloat16, or float32, got {A.dtype}\",\n # )\n # torch._check(\n # B.dtype in [torch.uint8, torch.bfloat16, torch.float16, torch.float32],\n # lambda: f\"B must be backed by storage of type uint8, bfloat16, float16, or float32, got {B.dtype}\",\n # )\n # torch._check(absmax.dtype == torch.float32, lambda: f\"absmax must be float32, got {absmax.dtype}\")\n # torch._check(code.dtype == torch.float32, lambda: f\"code must be float32, got {code.dtype}\")\n\n m = ct.c_int32(shapeB[0])\n n = ct.c_int32(1)\n k = ct.c_int32(shapeB[1])\n\n lda = m\n ldb = ct.c_int32((A.shape[-1] + 1) // 2)\n ldc = m\n\n stream = _get_tensor_stream(A)\n\n with _cuda_device_of(A):\n if A.dtype == torch.float16:\n lib.cgemm_4bit_inference_naive_fp16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.bfloat16:\n lib.cgemm_4bit_inference_naive_bf16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.float32:\n lib.cgemm_4bit_inference_naive_fp32(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n\n\n\"\"\"C FUNCTIONS FOR OPTIMIZERS\"\"\"\nstr2optimizer32bit = {\n \"adam\": (\n lib.cadam32bit_grad_fp32,\n lib.cadam32bit_grad_fp16,\n lib.cadam32bit_grad_bf16,\n ),\n \"momentum\": (\n lib.cmomentum32bit_grad_32,\n lib.cmomentum32bit_grad_16,\n ),\n \"rmsprop\": (\n lib.crmsprop32bit_grad_32,\n lib.crmsprop32bit_grad_16,\n ),\n \"lion\": (\n lib.clion32bit_grad_fp32,\n lib.clion32bit_grad_fp16,","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._optimizer_update_32bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._optimizer_update_32bit_impl#L613-L669","kind":"function","name":"_optimizer_update_32bit_impl","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":613,"end_line":669,"context_start_line":593,"context_end_line":689,"code":" lib.crmsprop_8bit_blockwise_grad_bf16,\n ),\n \"lion\": (\n lib.clion_8bit_blockwise_grad_fp32,\n lib.clion_8bit_blockwise_grad_fp16,\n lib.clion_8bit_blockwise_grad_bf16,\n ),\n \"adagrad\": (\n lib.cadagrad_8bit_blockwise_grad_fp32,\n lib.cadagrad_8bit_blockwise_grad_fp16,\n lib.cadagrad_8bit_blockwise_grad_bf16,\n ),\n \"ademamix\": (\n lib.cademamix_8bit_blockwise_grad_fp32,\n lib.cademamix_8bit_blockwise_grad_fp16,\n lib.cademamix_8bit_blockwise_grad_bf16,\n ),\n}\n\n\ndef _optimizer_update_32bit_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n unorm_vec: Optional[torch.Tensor],\n max_unorm: float,\n param_norm: float,\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n weight_decay: float,\n step: int,\n lr: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n optim_fns = str2optimizer32bit.get(optimizer_name, None)\n if optim_fns is None:\n raise ValueError(\n f\"Unsupported optimizer name: {optimizer_name}. Supported optimizers: {list(str2optimizer8bit_blockwise.keys())}\"\n )\n if g.dtype == torch.float32:\n optim_func = optim_fns[0]\n elif g.dtype == torch.float16:\n optim_func = optim_fns[1]\n elif g.dtype == torch.bfloat16 and len(optim_fns) == 3:\n optim_func = optim_fns[2]\n else:\n raise ValueError(\n f\"Gradient+optimizer bit data type combination not supported: grad {g.dtype}, optimizer {state1.dtype}\",\n )\n\n with _cuda_device_of(g):\n optim_func(\n get_ptr(g),\n get_ptr(p),\n get_ptr(state1),\n get_ptr(state2),\n get_ptr(unorm_vec),\n ct.c_float(max_unorm),\n ct.c_float(param_norm),\n ct.c_float(beta1),\n ct.c_float(beta2),\n ct.c_float(beta3),\n ct.c_float(alpha),\n ct.c_float(eps),\n ct.c_float(weight_decay),\n ct.c_int32(step),\n ct.c_float(lr),\n ct.c_float(gnorm_scale),\n ct.c_bool(skip_zeros),\n ct.c_int32(g.numel()),\n )\n\n\ndef _optimizer_update_8bit_blockwise_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.cuda.ops._optimizer_update_8bit_blockwise_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.cuda.ops._optimizer_update_8bit_blockwise_impl#L672-L762","kind":"function","name":"_optimizer_update_8bit_blockwise_impl","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":672,"end_line":762,"context_start_line":652,"context_end_line":766,"code":" get_ptr(p),\n get_ptr(state1),\n get_ptr(state2),\n get_ptr(unorm_vec),\n ct.c_float(max_unorm),\n ct.c_float(param_norm),\n ct.c_float(beta1),\n ct.c_float(beta2),\n ct.c_float(beta3),\n ct.c_float(alpha),\n ct.c_float(eps),\n ct.c_float(weight_decay),\n ct.c_int32(step),\n ct.c_float(lr),\n ct.c_float(gnorm_scale),\n ct.c_bool(skip_zeros),\n ct.c_int32(g.numel()),\n )\n\n\ndef _optimizer_update_8bit_blockwise_impl(\n optimizer_name: str,\n g: torch.Tensor,\n p: torch.Tensor,\n state1: torch.Tensor,\n state2: Optional[torch.Tensor],\n beta1: float,\n beta2: float,\n beta3: float,\n alpha: float,\n eps: float,\n step: int,\n lr: float,\n qmap1: torch.Tensor,\n qmap2: Optional[torch.Tensor],\n absmax1: torch.Tensor,\n absmax2: Optional[torch.Tensor],\n weight_decay: float,\n gnorm_scale: float,\n skip_zeros=False,\n) -> None:\n # torch._check(\n # g.numel() == p.numel(),\n # lambda: f\"g and p must have the same number of elements, got {g.numel()} and {p.numel()}\",\n # )\n # compute_dtypes = [torch.float16, torch.bfloat16, torch.float32]\n\n # torch._check(\n # g.dtype in compute_dtypes,\n # lambda: f\"g must be bfloat16, float16, or float32, got {g.dtype}\",\n # )\n # torch._check(\n # g.dtype == p.dtype,\n # lambda: f\"Expected all tensors to have the same dtype, got g.dtype={g.dtype}, p.dtype={p.dtype}\",\n # )\n # torch._check(\n # state1.dtype == torch.uint8,\n # lambda: f\"state1 must be uint8, got {state1.dtype}\",\n # )\n # torch._check(\n # qmap1.dtype == absmax1.dtype == torch.float32,\n # lambda: f\"Expected qmap1 and absmax1 to be float32, got qmap1.dtype={qmap1.dtype}, absmax1.dtype={absmax1.dtype}\",\n # )\n # if state2 is not None:\n # torch._check(\n # state2.dtype == torch.uint8,\n # lambda: f\"state2 must be uint8, got {state2.dtype}\",\n # )\n # torch._check(\n # qmap2.dtype == absmax2.dtype == torch.float32,\n # lambda: f\"Expected qmap2 and absmax2 to be float32, got qmap2.dtype={qmap2.dtype}, absmax2.dtype={absmax2.dtype}\",\n # )\n optimizer_fns = str2optimizer8bit_blockwise.get(optimizer_name)\n if optimizer_fns is None:\n raise ValueError(\n f\"Unsupported optimizer name: {optimizer_name}. Supported optimizers: {list(str2optimizer8bit_blockwise.keys())}\"\n )\n\n if g.dtype == torch.float32:\n optimizer_fn = optimizer_fns[0]\n elif g.dtype == torch.float16:\n optimizer_fn = optimizer_fns[1]\n elif g.dtype == torch.bfloat16:\n optimizer_fn = optimizer_fns[2]\n else:\n raise ValueError(\n f\"Unsupported gradient dtype: {g.dtype}. Supported dtypes: torch.float32, torch.float16, torch.bfloat16\"\n )\n\n with _cuda_device_of(g):\n optimizer_fn(\n get_ptr(p),\n get_ptr(g),\n get_ptr(state1),\n get_ptr(state2),\n ct.c_float(beta1),\n ct.c_float(beta2),\n ct.c_float(beta3),\n ct.c_float(alpha),\n ct.c_float(eps),\n ct.c_int32(step),\n ct.c_float(lr),\n get_ptr(qmap1),\n get_ptr(qmap2),\n get_ptr(absmax1),\n get_ptr(absmax2),\n ct.c_float(weight_decay),\n ct.c_float(gnorm_scale),\n ct.c_bool(skip_zeros),\n ct.c_int32(g.numel()),\n )\n\n\nregister_kernel(\"bitsandbytes::optimizer_update_8bit_blockwise\", \"cuda\")(_optimizer_update_8bit_blockwise_impl)\nregister_kernel(\"bitsandbytes::optimizer_update_32bit\", \"cuda\")(_optimizer_update_32bit_impl)","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.xpu.ops","uri":"program://bitsandbytes/module/bitsandbytes.backends.xpu.ops#L1-L242","kind":"module","name":"bitsandbytes.backends.xpu.ops","path":"bitsandbytes/backends/xpu/ops.py","language":"python","start_line":1,"end_line":242,"context_start_line":1,"context_end_line":242,"code":"from collections.abc import Sequence\nimport ctypes as ct\nimport logging\n\nfrom packaging import version\nimport torch\n\nfrom bitsandbytes.functional import _get_tensor_stream, get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import ErrorHandlerMockBNBNativeLibrary, lib\nfrom ..utils import triton_available\n\nlogger = logging.getLogger(__name__)\n\n# _int_mm is available in torch starting from 2.9 version\nif version.parse(torch.__version__).release >= version.parse(\"2.9\").release:\n\n @register_kernel(\"bitsandbytes::int8_linear_matmul\", \"xpu\")\n def _(A: torch.Tensor, B: torch.Tensor):\n return torch._int_mm(\n A.reshape(-1, A.shape[-1]),\n B.t(),\n ).reshape(*A.shape[:-1], B.shape[0])\n\n\ndef _dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n args = (\n None,\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(out.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.bfloat16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_bf16_fp4(*args)\n else:\n lib.cdequantize_blockwise_bf16_nf4(*args)\n elif dtype == torch.float16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp16_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp16_nf4(*args)\n elif dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp32_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp32_nf4(*args)\n\n\ndef _dequantize_blockwise_impl(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n) -> None:\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(A.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.float16:\n lib.cdequantize_blockwise_fp16(*args)\n elif dtype == torch.bfloat16:\n lib.cdequantize_blockwise_bf16(*args)\n elif dtype == torch.float32:\n lib.cdequantize_blockwise_fp32(*args)\n\n\ndef _gemv_4bit_impl(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n m = ct.c_int32(1)\n n = ct.c_int32(shapeB[0])\n k = ct.c_int32(shapeB[1])\n\n lda = m\n ldb = ct.c_int32((A.shape[-1] + 1) // 2)\n ldc = m\n\n stream = _get_tensor_stream(A)\n if A.dtype == torch.float16:\n lib.cgemv_4bit_inference_fp16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.bfloat16:\n lib.cgemv_4bit_inference_bf16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.float32:\n lib.cgemv_4bit_inference_fp32(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n\n\n# SYCL should be faster for xpu, so at first checking if it is available.\nif not isinstance(lib, ErrorHandlerMockBNBNativeLibrary):\n logger.info(\"Register sycl bitsandbytes kernels for XPU\")\n\n # TODO: Remove the triton register when quantization sycl kernel is ready.\n if triton_available:\n from ..triton import ops as triton_ops\n\n register_kernel(\"bitsandbytes::quantize_blockwise\", \"xpu\")(triton_ops.quantize_blockwise)\n register_kernel(\"bitsandbytes::quantize_4bit\", \"xpu\")(triton_ops.quantize_4bit)\n register_kernel(\"bitsandbytes::optimizer_update_8bit_blockwise\", \"xpu\")(\n triton_ops.optimizer_update_8bit_blockwise\n )\n register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"xpu\")(triton_ops.optimizer_update_32bit)\n\n @register_kernel(\"bitsandbytes::dequantize_4bit\", \"xpu\")\n def _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n shape: Sequence[int],\n dtype: torch.dtype,\n ) -> torch.Tensor:\n out = torch.empty(shape, dtype=dtype, device=A.device)\n _dequantize_4bit_impl(A, absmax, blocksize, quant_type, dtype, out=out)\n return out\n\n @register_kernel(\"bitsandbytes::dequantize_blockwise\", \"xpu\")\n def _(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype\n ) -> torch.Tensor:\n out = torch.empty_like(A, dtype=dtype)\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n return out\n\n @register_kernel(\"bitsandbytes::dequantize_blockwise.out\", \"xpu\")\n def _(\n A: torch.Tensor,\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n dtype: torch.dtype,\n out: torch.Tensor,\n ) -> None:\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n\n @register_kernel(\"bitsandbytes::gemv_4bit\", \"xpu\")\n def _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n ) -> torch.Tensor:\n shape = (*A.shape[:-1], shapeB[0])\n out = torch.empty(shape, device=A.device, dtype=A.dtype)\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n return out\n\n @register_kernel(\"bitsandbytes::gemv_4bit.out\", \"xpu\")\n def _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n ) -> None:\n torch._check(\n out.shape == (*A.shape[:-1], shapeB[0]),\n lambda: f\"Expected out.shape == {(*A.shape[:-1], shapeB[0])}, got {out.shape}\",\n )\n torch._check(out.dtype == A.dtype, lambda: f\"Expected out.dtype == {A.dtype}, got {out.dtype}\")\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\nelif triton_available:\n logger.info(\"Register triton bitsandbytes kernels for XPU\")\n from ..triton import ops as triton_ops\n\n register_kernel(\"bitsandbytes::quantize_blockwise\", \"xpu\")(triton_ops.quantize_blockwise)\n register_kernel(\"bitsandbytes::dequantize_blockwise.out\", \"xpu\")(triton_ops.dequantize_blockwise_inplace)\n register_kernel(\"bitsandbytes::dequantize_blockwise\", \"xpu\")(triton_ops.dequantize_blockwise)\n register_kernel(\"bitsandbytes::quantize_4bit\", \"xpu\")(triton_ops.quantize_4bit)\n register_kernel(\"bitsandbytes::dequantize_4bit.out\", \"xpu\")(triton_ops.dequantize_4bit_inplace)\n register_kernel(\"bitsandbytes::dequantize_4bit\", \"xpu\")(triton_ops.dequantize_4bit)\n register_kernel(\"bitsandbytes::gemv_4bit\", \"xpu\")(triton_ops.gemv_4bit)\n register_kernel(\"bitsandbytes::optimizer_update_8bit_blockwise\", \"xpu\")(triton_ops.optimizer_update_8bit_blockwise)\n register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"xpu\")(triton_ops.optimizer_update_32bit)\nelse:\n logger.warning(\"Register pytorch bitsandbytes kernels for XPU because no native library or triton packages found.\")","source_hash":"f55e99efbc3b835e8a6293b713803dc3ebce7225b1b5ef01b1dadf8c2342377e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.xpu.ops._dequantize_4bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.xpu.ops._dequantize_4bit_impl#L27-L58","kind":"function","name":"_dequantize_4bit_impl","path":"bitsandbytes/backends/xpu/ops.py","language":"python","start_line":27,"end_line":58,"context_start_line":7,"context_end_line":78,"code":"\nfrom bitsandbytes.functional import _get_tensor_stream, get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import ErrorHandlerMockBNBNativeLibrary, lib\nfrom ..utils import triton_available\n\nlogger = logging.getLogger(__name__)\n\n# _int_mm is available in torch starting from 2.9 version\nif version.parse(torch.__version__).release >= version.parse(\"2.9\").release:\n\n @register_kernel(\"bitsandbytes::int8_linear_matmul\", \"xpu\")\n def _(A: torch.Tensor, B: torch.Tensor):\n return torch._int_mm(\n A.reshape(-1, A.shape[-1]),\n B.t(),\n ).reshape(*A.shape[:-1], B.shape[0])\n\n\ndef _dequantize_4bit_impl(\n A: torch.Tensor,\n absmax: torch.Tensor,\n blocksize: int,\n quant_type: str,\n dtype: torch.dtype,\n out: torch.Tensor,\n) -> None:\n args = (\n None,\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(out.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.bfloat16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_bf16_fp4(*args)\n else:\n lib.cdequantize_blockwise_bf16_nf4(*args)\n elif dtype == torch.float16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp16_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp16_nf4(*args)\n elif dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp32_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp32_nf4(*args)\n\n\ndef _dequantize_blockwise_impl(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n) -> None:\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(A.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.float16:\n lib.cdequantize_blockwise_fp16(*args)\n elif dtype == torch.bfloat16:\n lib.cdequantize_blockwise_bf16(*args)\n elif dtype == torch.float32:\n lib.cdequantize_blockwise_fp32(*args)","source_hash":"f55e99efbc3b835e8a6293b713803dc3ebce7225b1b5ef01b1dadf8c2342377e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.xpu.ops._dequantize_blockwise_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.xpu.ops._dequantize_blockwise_impl#L61-L78","kind":"function","name":"_dequantize_blockwise_impl","path":"bitsandbytes/backends/xpu/ops.py","language":"python","start_line":61,"end_line":78,"context_start_line":41,"context_end_line":98,"code":" ct.c_int(out.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.bfloat16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_bf16_fp4(*args)\n else:\n lib.cdequantize_blockwise_bf16_nf4(*args)\n elif dtype == torch.float16:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp16_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp16_nf4(*args)\n elif dtype == torch.float32:\n if quant_type == \"fp4\":\n lib.cdequantize_blockwise_fp32_fp4(*args)\n else:\n lib.cdequantize_blockwise_fp32_nf4(*args)\n\n\ndef _dequantize_blockwise_impl(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n) -> None:\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(A.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.float16:\n lib.cdequantize_blockwise_fp16(*args)\n elif dtype == torch.bfloat16:\n lib.cdequantize_blockwise_bf16(*args)\n elif dtype == torch.float32:\n lib.cdequantize_blockwise_fp32(*args)\n\n\ndef _gemv_4bit_impl(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n m = ct.c_int32(1)\n n = ct.c_int32(shapeB[0])\n k = ct.c_int32(shapeB[1])\n\n lda = m\n ldb = ct.c_int32((A.shape[-1] + 1) // 2)\n ldc = m\n\n stream = _get_tensor_stream(A)","source_hash":"f55e99efbc3b835e8a6293b713803dc3ebce7225b1b5ef01b1dadf8c2342377e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.xpu.ops._gemv_4bit_impl","uri":"program://bitsandbytes/function/bitsandbytes.backends.xpu.ops._gemv_4bit_impl#L81-L146","kind":"function","name":"_gemv_4bit_impl","path":"bitsandbytes/backends/xpu/ops.py","language":"python","start_line":81,"end_line":146,"context_start_line":61,"context_end_line":166,"code":"def _dequantize_blockwise_impl(\n A: torch.Tensor, absmax: torch.Tensor, code: torch.Tensor, blocksize: int, dtype: torch.dtype, out: torch.Tensor\n) -> None:\n args = (\n get_ptr(code),\n get_ptr(A),\n get_ptr(absmax),\n get_ptr(out),\n ct.c_int(blocksize),\n ct.c_int(A.numel()),\n _get_tensor_stream(A),\n )\n if dtype == torch.float16:\n lib.cdequantize_blockwise_fp16(*args)\n elif dtype == torch.bfloat16:\n lib.cdequantize_blockwise_bf16(*args)\n elif dtype == torch.float32:\n lib.cdequantize_blockwise_fp32(*args)\n\n\ndef _gemv_4bit_impl(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n) -> None:\n m = ct.c_int32(1)\n n = ct.c_int32(shapeB[0])\n k = ct.c_int32(shapeB[1])\n\n lda = m\n ldb = ct.c_int32((A.shape[-1] + 1) // 2)\n ldc = m\n\n stream = _get_tensor_stream(A)\n if A.dtype == torch.float16:\n lib.cgemv_4bit_inference_fp16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.bfloat16:\n lib.cgemv_4bit_inference_bf16(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n elif A.dtype == torch.float32:\n lib.cgemv_4bit_inference_fp32(\n m,\n n,\n k,\n get_ptr(A),\n get_ptr(B),\n get_ptr(absmax),\n get_ptr(code),\n get_ptr(out),\n lda,\n ldb,\n ldc,\n ct.c_int32(blocksize),\n stream,\n )\n\n\n# SYCL should be faster for xpu, so at first checking if it is available.\nif not isinstance(lib, ErrorHandlerMockBNBNativeLibrary):\n logger.info(\"Register sycl bitsandbytes kernels for XPU\")\n\n # TODO: Remove the triton register when quantization sycl kernel is ready.\n if triton_available:\n from ..triton import ops as triton_ops\n\n register_kernel(\"bitsandbytes::quantize_blockwise\", \"xpu\")(triton_ops.quantize_blockwise)\n register_kernel(\"bitsandbytes::quantize_4bit\", \"xpu\")(triton_ops.quantize_4bit)\n register_kernel(\"bitsandbytes::optimizer_update_8bit_blockwise\", \"xpu\")(\n triton_ops.optimizer_update_8bit_blockwise\n )\n register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"xpu\")(triton_ops.optimizer_update_32bit)\n\n @register_kernel(\"bitsandbytes::dequantize_4bit\", \"xpu\")\n def _(\n A: torch.Tensor,","source_hash":"f55e99efbc3b835e8a6293b713803dc3ebce7225b1b5ef01b1dadf8c2342377e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.backends.xpu.ops._","uri":"program://bitsandbytes/function/bitsandbytes.backends.xpu.ops._#L213-L227","kind":"function","name":"_","path":"bitsandbytes/backends/xpu/ops.py","language":"python","start_line":213,"end_line":227,"context_start_line":193,"context_end_line":242,"code":" ) -> None:\n torch._check(out.dtype == dtype, lambda: f\"Expected out.dtype == {dtype}, got {out.dtype}\")\n torch._check(out.shape == A.shape, lambda: f\"Expected out.shape == {A.shape}, got {out.shape}\")\n _dequantize_blockwise_impl(A, absmax, code, blocksize, dtype, out=out)\n\n @register_kernel(\"bitsandbytes::gemv_4bit\", \"xpu\")\n def _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n ) -> torch.Tensor:\n shape = (*A.shape[:-1], shapeB[0])\n out = torch.empty(shape, device=A.device, dtype=A.dtype)\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\n return out\n\n @register_kernel(\"bitsandbytes::gemv_4bit.out\", \"xpu\")\n def _(\n A: torch.Tensor,\n B: torch.Tensor,\n shapeB: Sequence[int],\n absmax: torch.Tensor,\n code: torch.Tensor,\n blocksize: int,\n out: torch.Tensor,\n ) -> None:\n torch._check(\n out.shape == (*A.shape[:-1], shapeB[0]),\n lambda: f\"Expected out.shape == {(*A.shape[:-1], shapeB[0])}, got {out.shape}\",\n )\n torch._check(out.dtype == A.dtype, lambda: f\"Expected out.dtype == {A.dtype}, got {out.dtype}\")\n _gemv_4bit_impl(A, B, shapeB, absmax, code, blocksize, out=out)\nelif triton_available:\n logger.info(\"Register triton bitsandbytes kernels for XPU\")\n from ..triton import ops as triton_ops\n\n register_kernel(\"bitsandbytes::quantize_blockwise\", \"xpu\")(triton_ops.quantize_blockwise)\n register_kernel(\"bitsandbytes::dequantize_blockwise.out\", \"xpu\")(triton_ops.dequantize_blockwise_inplace)\n register_kernel(\"bitsandbytes::dequantize_blockwise\", \"xpu\")(triton_ops.dequantize_blockwise)\n register_kernel(\"bitsandbytes::quantize_4bit\", \"xpu\")(triton_ops.quantize_4bit)\n register_kernel(\"bitsandbytes::dequantize_4bit.out\", \"xpu\")(triton_ops.dequantize_4bit_inplace)\n register_kernel(\"bitsandbytes::dequantize_4bit\", \"xpu\")(triton_ops.dequantize_4bit)\n register_kernel(\"bitsandbytes::gemv_4bit\", \"xpu\")(triton_ops.gemv_4bit)\n register_kernel(\"bitsandbytes::optimizer_update_8bit_blockwise\", \"xpu\")(triton_ops.optimizer_update_8bit_blockwise)\n register_kernel(\"bitsandbytes::optimizer_update_32bit\", \"xpu\")(triton_ops.optimizer_update_32bit)\nelse:\n logger.warning(\"Register pytorch bitsandbytes kernels for XPU because no native library or triton packages found.\")","source_hash":"f55e99efbc3b835e8a6293b713803dc3ebce7225b1b5ef01b1dadf8c2342377e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_rowwise_dequantize","uri":"program://bitsandbytes/module/bitsandbytes.triton.int8_matmul_rowwise_dequantize#L1-L207","kind":"module","name":"bitsandbytes.triton.int8_matmul_rowwise_dequantize","path":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","language":"python","start_line":1,"end_line":207,"context_start_line":1,"context_end_line":207,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and columnwise quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()\n\n def get_configs_io_bound():\n configs = []\n for num_stages in [2, 3, 4, 5, 6]:\n for block_m in [16, 32]:\n for block_k in [32, 64]:\n for block_n in [32, 64, 128, 256]:\n num_warps = 2 if block_n <= 64 else 4\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": 1},\n num_stages=num_stages,\n num_warps=num_warps,\n ),\n )\n # split_k\n for split_k in [2, 4, 8, 16]:\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": split_k},\n num_stages=num_stages,\n num_warps=num_warps,\n pre_hook=init_to_zero(\"C\"),\n ),\n )\n return configs\n\n @triton.autotune(\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n # good for int8\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n *get_configs_io_bound(),\n ],\n key=[\"M\", \"N\", \"K\"],\n prune_configs_by={\"early_config_prune\": early_config_prune, \"perf_model\": estimate_matmul_time, \"top_k\": 10},\n )\n @triton.heuristics(\n {\n \"EVEN_K\": lambda args: args[\"K\"] % (args[\"BLOCK_K\"] * args[\"SPLIT_K\"]) == 0,\n },\n )\n @triton.jit\n def _int8_matmul_rowwise_dequantize(\n A,\n B,\n C,\n bias,\n state_x_ptr,\n state_w_ptr,\n M,\n N,\n K,\n divfactor,\n has_bias: tl.constexpr,\n stride_am,\n stride_ak,\n stride_bk,\n stride_bn,\n stride_cm,\n stride_cn,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n BLOCK_K: tl.constexpr,\n GROUP_M: tl.constexpr,\n SPLIT_K: tl.constexpr,\n EVEN_K: tl.constexpr,\n ACC_TYPE: tl.constexpr,\n ):\n # matrix multiplication\n pid = tl.program_id(0)\n pid_z = tl.program_id(1)\n grid_m = tl.cdiv(M, BLOCK_M)\n grid_n = tl.cdiv(N, BLOCK_N)\n # re-order program ID for better L2 performance\n width = GROUP_M * grid_n\n group_id = pid // width\n group_size = min(grid_m - group_id * GROUP_M, GROUP_M)\n pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // (group_size)\n # do matrix multiplication\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)\n rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)\n rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)\n # pointers\n A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)\n B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)\n\n # rematerialize rm and rn to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n\n w_factor = tl.load(state_w_ptr + rbn)[None, :]\n x_factor = tl.load(state_x_ptr + ram)[:, None]\n\n # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)\n acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)\n for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):\n if EVEN_K:\n a = tl.load(A)\n b = tl.load(B)\n else:\n k_remaining = K - k * (BLOCK_K * SPLIT_K)\n a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)\n b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)\n acc += tl.dot(a, b)\n A += BLOCK_K * SPLIT_K * stride_ak\n B += BLOCK_K * SPLIT_K * stride_bk\n\n acc = w_factor * (x_factor * (acc * divfactor))\n acc = acc.to(C.dtype.element_ty)\n\n if has_bias:\n bias = tl.load(bias + rn).to(C.dtype.element_ty)\n acc = acc + bias[None, :]\n\n C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n # handles write-back with reduction-splitting\n if SPLIT_K == 1:\n tl.store(C, acc, mask=mask)\n else:\n tl.atomic_add(C, acc, mask=mask)\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n divfactor = 1.0 / (127.0 * 127.0)\n\n has_bias = 0 if bias is None else 1\n\n device = a.device\n # handle non-contiguous inputs if necessary\n if a.stride(0) > 1 and a.stride(1) > 1:\n a = a.contiguous()\n if b.stride(0) > 1 and b.stride(1) > 1:\n b = b.contiguous()\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n M, K = a.shape\n _, N = b.shape\n # allocates output\n c = torch.empty((M, N), device=device, dtype=torch.float16)\n # accumulator types\n ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32\n # launch int8_matmul_rowwise_dequantize kernel\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]), META[\"SPLIT_K\"])\n _int8_matmul_rowwise_dequantize[grid](\n a,\n b,\n c,\n bias,\n state_x,\n state_w,\n M,\n N,\n K,\n divfactor,\n has_bias,\n a.stride(0),\n a.stride(1),\n b.stride(0),\n b.stride(1),\n c.stride(0),\n c.stride(1),\n GROUP_M=8,\n ACC_TYPE=ACC_TYPE,\n )\n return c","source_hash":"dad172677f7537ee1e30efc8b57f56e631d8536ac85d6ee8a433937ae982a177","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_rowwise_dequantize.int8_matmul_rowwise_dequantize","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_rowwise_dequantize.int8_matmul_rowwise_dequantize#L165-L207","kind":"function","name":"int8_matmul_rowwise_dequantize","path":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","language":"python","start_line":165,"end_line":207,"context_start_line":145,"context_end_line":207,"code":" b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)\n acc += tl.dot(a, b)\n A += BLOCK_K * SPLIT_K * stride_ak\n B += BLOCK_K * SPLIT_K * stride_bk\n\n acc = w_factor * (x_factor * (acc * divfactor))\n acc = acc.to(C.dtype.element_ty)\n\n if has_bias:\n bias = tl.load(bias + rn).to(C.dtype.element_ty)\n acc = acc + bias[None, :]\n\n C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n # handles write-back with reduction-splitting\n if SPLIT_K == 1:\n tl.store(C, acc, mask=mask)\n else:\n tl.atomic_add(C, acc, mask=mask)\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n divfactor = 1.0 / (127.0 * 127.0)\n\n has_bias = 0 if bias is None else 1\n\n device = a.device\n # handle non-contiguous inputs if necessary\n if a.stride(0) > 1 and a.stride(1) > 1:\n a = a.contiguous()\n if b.stride(0) > 1 and b.stride(1) > 1:\n b = b.contiguous()\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n M, K = a.shape\n _, N = b.shape\n # allocates output\n c = torch.empty((M, N), device=device, dtype=torch.float16)\n # accumulator types\n ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32\n # launch int8_matmul_rowwise_dequantize kernel\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]), META[\"SPLIT_K\"])\n _int8_matmul_rowwise_dequantize[grid](\n a,\n b,\n c,\n bias,\n state_x,\n state_w,\n M,\n N,\n K,\n divfactor,\n has_bias,\n a.stride(0),\n a.stride(1),\n b.stride(0),\n b.stride(1),\n c.stride(0),\n c.stride(1),\n GROUP_M=8,\n ACC_TYPE=ACC_TYPE,\n )\n return c","source_hash":"dad172677f7537ee1e30efc8b57f56e631d8536ac85d6ee8a433937ae982a177","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_rowwise_dequantize.init_to_zero","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_rowwise_dequantize.init_to_zero#L20-L21","kind":"function","name":"init_to_zero","path":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","language":"python","start_line":20,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and columnwise quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()\n\n def get_configs_io_bound():\n configs = []\n for num_stages in [2, 3, 4, 5, 6]:\n for block_m in [16, 32]:\n for block_k in [32, 64]:\n for block_n in [32, 64, 128, 256]:\n num_warps = 2 if block_n <= 64 else 4\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": 1},\n num_stages=num_stages,\n num_warps=num_warps,\n ),\n )\n # split_k\n for split_k in [2, 4, 8, 16]:\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": split_k},","source_hash":"dad172677f7537ee1e30efc8b57f56e631d8536ac85d6ee8a433937ae982a177","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_rowwise_dequantize.get_configs_io_bound","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_rowwise_dequantize.get_configs_io_bound#L23-L47","kind":"function","name":"get_configs_io_bound","path":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","language":"python","start_line":23,"end_line":47,"context_start_line":3,"context_end_line":67,"code":"from bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and columnwise quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()\n\n def get_configs_io_bound():\n configs = []\n for num_stages in [2, 3, 4, 5, 6]:\n for block_m in [16, 32]:\n for block_k in [32, 64]:\n for block_n in [32, 64, 128, 256]:\n num_warps = 2 if block_n <= 64 else 4\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": 1},\n num_stages=num_stages,\n num_warps=num_warps,\n ),\n )\n # split_k\n for split_k in [2, 4, 8, 16]:\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": split_k},\n num_stages=num_stages,\n num_warps=num_warps,\n pre_hook=init_to_zero(\"C\"),\n ),\n )\n return configs\n\n @triton.autotune(\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n # good for int8\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),","source_hash":"dad172677f7537ee1e30efc8b57f56e631d8536ac85d6ee8a433937ae982a177","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_rowwise_dequantize._int8_matmul_rowwise_dequantize","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_rowwise_dequantize._int8_matmul_rowwise_dequantize#L82-L163","kind":"function","name":"_int8_matmul_rowwise_dequantize","path":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","language":"python","start_line":82,"end_line":163,"context_start_line":62,"context_end_line":183,"code":" triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n *get_configs_io_bound(),\n ],\n key=[\"M\", \"N\", \"K\"],\n prune_configs_by={\"early_config_prune\": early_config_prune, \"perf_model\": estimate_matmul_time, \"top_k\": 10},\n )\n @triton.heuristics(\n {\n \"EVEN_K\": lambda args: args[\"K\"] % (args[\"BLOCK_K\"] * args[\"SPLIT_K\"]) == 0,\n },\n )\n @triton.jit\n def _int8_matmul_rowwise_dequantize(\n A,\n B,\n C,\n bias,\n state_x_ptr,\n state_w_ptr,\n M,\n N,\n K,\n divfactor,\n has_bias: tl.constexpr,\n stride_am,\n stride_ak,\n stride_bk,\n stride_bn,\n stride_cm,\n stride_cn,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n BLOCK_K: tl.constexpr,\n GROUP_M: tl.constexpr,\n SPLIT_K: tl.constexpr,\n EVEN_K: tl.constexpr,\n ACC_TYPE: tl.constexpr,\n ):\n # matrix multiplication\n pid = tl.program_id(0)\n pid_z = tl.program_id(1)\n grid_m = tl.cdiv(M, BLOCK_M)\n grid_n = tl.cdiv(N, BLOCK_N)\n # re-order program ID for better L2 performance\n width = GROUP_M * grid_n\n group_id = pid // width\n group_size = min(grid_m - group_id * GROUP_M, GROUP_M)\n pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // (group_size)\n # do matrix multiplication\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)\n rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)\n rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)\n # pointers\n A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)\n B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)\n\n # rematerialize rm and rn to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n\n w_factor = tl.load(state_w_ptr + rbn)[None, :]\n x_factor = tl.load(state_x_ptr + ram)[:, None]\n\n # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)\n acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)\n for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):\n if EVEN_K:\n a = tl.load(A)\n b = tl.load(B)\n else:\n k_remaining = K - k * (BLOCK_K * SPLIT_K)\n a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)\n b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)\n acc += tl.dot(a, b)\n A += BLOCK_K * SPLIT_K * stride_ak\n B += BLOCK_K * SPLIT_K * stride_bk\n\n acc = w_factor * (x_factor * (acc * divfactor))\n acc = acc.to(C.dtype.element_ty)\n\n if has_bias:\n bias = tl.load(bias + rn).to(C.dtype.element_ty)\n acc = acc + bias[None, :]\n\n C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n # handles write-back with reduction-splitting\n if SPLIT_K == 1:\n tl.store(C, acc, mask=mask)\n else:\n tl.atomic_add(C, acc, mask=mask)\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n divfactor = 1.0 / (127.0 * 127.0)\n\n has_bias = 0 if bias is None else 1\n\n device = a.device\n # handle non-contiguous inputs if necessary\n if a.stride(0) > 1 and a.stride(1) > 1:\n a = a.contiguous()\n if b.stride(0) > 1 and b.stride(1) > 1:\n b = b.contiguous()\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n M, K = a.shape\n _, N = b.shape\n # allocates output\n c = torch.empty((M, N), device=device, dtype=torch.float16)\n # accumulator types\n ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32","source_hash":"dad172677f7537ee1e30efc8b57f56e631d8536ac85d6ee8a433937ae982a177","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.triton_utils","uri":"program://bitsandbytes/module/bitsandbytes.triton.triton_utils#L1-L11","kind":"module","name":"bitsandbytes.triton.triton_utils","path":"bitsandbytes/triton/triton_utils.py","language":"python","start_line":1,"end_line":11,"context_start_line":1,"context_end_line":11,"code":"import functools\n\n\n@functools.lru_cache(None)\ndef is_triton_available():\n try:\n from torch.utils._triton import has_triton, has_triton_package\n\n return has_triton_package() and has_triton()\n except Exception:\n return False","source_hash":"74baf2369f2dafd058d584ed503762ddfbfd90025851b89ea82f10a6b956f5bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.triton_utils.is_triton_available","uri":"program://bitsandbytes/function/bitsandbytes.triton.triton_utils.is_triton_available#L5-L11","kind":"function","name":"is_triton_available","path":"bitsandbytes/triton/triton_utils.py","language":"python","start_line":5,"end_line":11,"context_start_line":1,"context_end_line":11,"code":"import functools\n\n\n@functools.lru_cache(None)\ndef is_triton_available():\n try:\n from torch.utils._triton import has_triton, has_triton_package\n\n return has_triton_package() and has_triton()\n except Exception:\n return False","source_hash":"74baf2369f2dafd058d584ed503762ddfbfd90025851b89ea82f10a6b956f5bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_global","uri":"program://bitsandbytes/module/bitsandbytes.triton.quantize_global#L1-L124","kind":"module","name":"bitsandbytes.triton.quantize_global","path":"bitsandbytes/triton/quantize_global.py","language":"python","start_line":1,"end_line":124,"context_start_line":1,"context_end_line":124,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def quantize_global_transpose(input):\n return None\n\n def quantize_global(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # global quantize\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_SIZE\": 1024}, num_warps=4),\n triton.Config({\"BLOCK_SIZE\": 2048}, num_stages=1),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_global(\n x_ptr,\n absmax_inv_ptr,\n output_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n offsets = block_start + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n x = tl.load(x_ptr + offsets, mask=mask)\n absmax_inv = tl.load(absmax_inv_ptr)\n output = tl.libdevice.llrint(127.0 * (x * absmax_inv))\n tl.store(output_ptr + offsets, output, mask=mask)\n\n def quantize_global(x: torch.Tensor):\n absmax = x.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n output = torch.empty(*x.shape, device=\"cuda\", dtype=torch.int8)\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_global[grid](x, absmax_inv, output, n_elements)\n return output, absmax\n\n # global quantize and transpose\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n # ...\n ],\n key=[\"M\", \"N\"],\n )\n @triton.jit\n def _quantize_global_transpose(\n A,\n absmax_inv_ptr,\n B,\n stride_am,\n stride_an,\n stride_bn,\n stride_bm,\n M,\n N,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n GROUP_M: tl.constexpr,\n ):\n pid = tl.program_id(0)\n grid_m = (M + BLOCK_M - 1) // BLOCK_M\n grid_n = (N + BLOCK_N - 1) // BLOCK_N\n\n width = GROUP_M * grid_n\n group_id = pid // width\n group_size = min(grid_m - group_id * GROUP_M, GROUP_M)\n pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // group_size\n\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n a = tl.load(A, mask=mask)\n absmax_inv = tl.load(absmax_inv_ptr)\n\n # rematerialize to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n\n output = tl.libdevice.llrint(127.0 * (a * absmax_inv))\n\n tl.store(B, output, mask=mask)\n\n def quantize_global_transpose(input):\n absmax = input.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n M, N = input.shape\n out = torch.empty(N, M, device=\"cuda\", dtype=torch.int8)\n\n assert out.size(0) == N and out.size(1) == M\n assert input.stride(0) == 1 or input.stride(1) == 1\n assert out.stride(0) == 1 or out.stride(1) == 1\n\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]),)\n _quantize_global_transpose[grid](\n input,\n absmax_inv,\n out,\n input.stride(0),\n input.stride(1),\n out.stride(0),\n out.stride(1),\n M,\n N,\n )\n return out, absmax","source_hash":"eb761777b41fe7aea95cf8d8c826a5ec7e3ffc6bd681f43525bbdff495176ac4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_global.quantize_global_transpose","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_global.quantize_global_transpose#L102-L124","kind":"function","name":"quantize_global_transpose","path":"bitsandbytes/triton/quantize_global.py","language":"python","start_line":102,"end_line":124,"context_start_line":82,"context_end_line":124,"code":" pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // group_size\n\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n a = tl.load(A, mask=mask)\n absmax_inv = tl.load(absmax_inv_ptr)\n\n # rematerialize to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n\n output = tl.libdevice.llrint(127.0 * (a * absmax_inv))\n\n tl.store(B, output, mask=mask)\n\n def quantize_global_transpose(input):\n absmax = input.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n M, N = input.shape\n out = torch.empty(N, M, device=\"cuda\", dtype=torch.int8)\n\n assert out.size(0) == N and out.size(1) == M\n assert input.stride(0) == 1 or input.stride(1) == 1\n assert out.stride(0) == 1 or out.stride(1) == 1\n\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]),)\n _quantize_global_transpose[grid](\n input,\n absmax_inv,\n out,\n input.stride(0),\n input.stride(1),\n out.stride(0),\n out.stride(1),\n M,\n N,\n )\n return out, absmax","source_hash":"eb761777b41fe7aea95cf8d8c826a5ec7e3ffc6bd681f43525bbdff495176ac4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_global.quantize_global","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_global.quantize_global#L41-L49","kind":"function","name":"quantize_global","path":"bitsandbytes/triton/quantize_global.py","language":"python","start_line":41,"end_line":49,"context_start_line":21,"context_end_line":69,"code":" ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_global(\n x_ptr,\n absmax_inv_ptr,\n output_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n offsets = block_start + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n x = tl.load(x_ptr + offsets, mask=mask)\n absmax_inv = tl.load(absmax_inv_ptr)\n output = tl.libdevice.llrint(127.0 * (x * absmax_inv))\n tl.store(output_ptr + offsets, output, mask=mask)\n\n def quantize_global(x: torch.Tensor):\n absmax = x.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n output = torch.empty(*x.shape, device=\"cuda\", dtype=torch.int8)\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_global[grid](x, absmax_inv, output, n_elements)\n return output, absmax\n\n # global quantize and transpose\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n # ...\n ],\n key=[\"M\", \"N\"],\n )\n @triton.jit\n def _quantize_global_transpose(\n A,\n absmax_inv_ptr,\n B,\n stride_am,\n stride_an,\n stride_bn,\n stride_bm,\n M,","source_hash":"eb761777b41fe7aea95cf8d8c826a5ec7e3ffc6bd681f43525bbdff495176ac4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_global._quantize_global","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_global._quantize_global#L25-L39","kind":"function","name":"_quantize_global","path":"bitsandbytes/triton/quantize_global.py","language":"python","start_line":25,"end_line":39,"context_start_line":5,"context_end_line":59,"code":"if not is_triton_available():\n\n def quantize_global_transpose(input):\n return None\n\n def quantize_global(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # global quantize\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_SIZE\": 1024}, num_warps=4),\n triton.Config({\"BLOCK_SIZE\": 2048}, num_stages=1),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_global(\n x_ptr,\n absmax_inv_ptr,\n output_ptr,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n offsets = block_start + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n x = tl.load(x_ptr + offsets, mask=mask)\n absmax_inv = tl.load(absmax_inv_ptr)\n output = tl.libdevice.llrint(127.0 * (x * absmax_inv))\n tl.store(output_ptr + offsets, output, mask=mask)\n\n def quantize_global(x: torch.Tensor):\n absmax = x.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n output = torch.empty(*x.shape, device=\"cuda\", dtype=torch.int8)\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_global[grid](x, absmax_inv, output, n_elements)\n return output, absmax\n\n # global quantize and transpose\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n # ...\n ],\n key=[\"M\", \"N\"],\n )","source_hash":"eb761777b41fe7aea95cf8d8c826a5ec7e3ffc6bd681f43525bbdff495176ac4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_global._quantize_global_transpose","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_global._quantize_global_transpose#L61-L100","kind":"function","name":"_quantize_global_transpose","path":"bitsandbytes/triton/quantize_global.py","language":"python","start_line":61,"end_line":100,"context_start_line":41,"context_end_line":120,"code":" def quantize_global(x: torch.Tensor):\n absmax = x.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n output = torch.empty(*x.shape, device=\"cuda\", dtype=torch.int8)\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_global[grid](x, absmax_inv, output, n_elements)\n return output, absmax\n\n # global quantize and transpose\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"GROUP_M\": 8}, num_warps=4),\n # ...\n ],\n key=[\"M\", \"N\"],\n )\n @triton.jit\n def _quantize_global_transpose(\n A,\n absmax_inv_ptr,\n B,\n stride_am,\n stride_an,\n stride_bn,\n stride_bm,\n M,\n N,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n GROUP_M: tl.constexpr,\n ):\n pid = tl.program_id(0)\n grid_m = (M + BLOCK_M - 1) // BLOCK_M\n grid_n = (N + BLOCK_N - 1) // BLOCK_N\n\n width = GROUP_M * grid_n\n group_id = pid // width\n group_size = min(grid_m - group_id * GROUP_M, GROUP_M)\n pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // group_size\n\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n A = A + (rm[:, None] * stride_am + rn[None, :] * stride_an)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n a = tl.load(A, mask=mask)\n absmax_inv = tl.load(absmax_inv_ptr)\n\n # rematerialize to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n B = B + (rm[:, None] * stride_bm + rn[None, :] * stride_bn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n\n output = tl.libdevice.llrint(127.0 * (a * absmax_inv))\n\n tl.store(B, output, mask=mask)\n\n def quantize_global_transpose(input):\n absmax = input.abs().max().unsqueeze(0)\n absmax_inv = 1.0 / absmax\n M, N = input.shape\n out = torch.empty(N, M, device=\"cuda\", dtype=torch.int8)\n\n assert out.size(0) == N and out.size(1) == M\n assert input.stride(0) == 1 or input.stride(1) == 1\n assert out.stride(0) == 1 or out.stride(1) == 1\n\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]),)\n _quantize_global_transpose[grid](\n input,\n absmax_inv,\n out,\n input.stride(0),\n input.stride(1),\n out.stride(0),\n out.stride(1),","source_hash":"eb761777b41fe7aea95cf8d8c826a5ec7e3ffc6bd681f43525bbdff495176ac4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_columnwise_and_transpose","uri":"program://bitsandbytes/module/bitsandbytes.triton.quantize_columnwise_and_transpose#L1-L75","kind":"module","name":"bitsandbytes.triton.quantize_columnwise_and_transpose","path":"bitsandbytes/triton/quantize_columnwise_and_transpose.py","language":"python","start_line":1,"end_line":75,"context_start_line":1,"context_end_line":75,"code":"import math\n\nimport torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def quantize_columnwise_and_transpose(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # This kernel does fused columnwise quantization and transpose.\n\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),\n triton.Config({}, num_stages=4),\n triton.Config({}, num_stages=8),\n triton.Config({}, num_stages=16),\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),\n triton.Config({}, num_stages=4, num_warps=8),\n triton.Config({}, num_stages=8, num_warps=8),\n triton.Config({}, num_stages=16, num_warps=8),\n triton.Config({}, num_warps=1),\n triton.Config({}, num_warps=2),\n triton.Config({}, num_warps=4),\n triton.Config({}, num_warps=8),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_columnwise_and_transpose(\n x_ptr,\n output_ptr,\n output_maxs,\n n_elements,\n M: tl.constexpr,\n N: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid\n p2_arange = tl.arange(0, P2)\n p2_arange_mask = p2_arange < M\n arange = p2_arange * N\n offsets = block_start + arange\n x = tl.load(x_ptr + offsets, mask=p2_arange_mask)\n abs_x = tl.abs(x)\n max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0)\n output = tl.libdevice.llrint(127.0 * (x / max_val))\n\n new_start = pid * M\n new_offsets = new_start + p2_arange\n tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask)\n tl.store(output_maxs + pid, max_val)\n\n def quantize_columnwise_and_transpose(x: torch.Tensor):\n M, N = x.shape\n output = torch.empty(N, M, device=x.device, dtype=torch.int8)\n output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(M))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2)\n return output, output_maxs","source_hash":"c09bbb8aa605d84ab832f5da7d302b5b73219421dae9ecc531efa283a247fb4f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_columnwise_and_transpose.quantize_columnwise_and_transpose","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_columnwise_and_transpose.quantize_columnwise_and_transpose#L64-L75","kind":"function","name":"quantize_columnwise_and_transpose","path":"bitsandbytes/triton/quantize_columnwise_and_transpose.py","language":"python","start_line":64,"end_line":75,"context_start_line":44,"context_end_line":75,"code":" N: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid\n p2_arange = tl.arange(0, P2)\n p2_arange_mask = p2_arange < M\n arange = p2_arange * N\n offsets = block_start + arange\n x = tl.load(x_ptr + offsets, mask=p2_arange_mask)\n abs_x = tl.abs(x)\n max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0)\n output = tl.libdevice.llrint(127.0 * (x / max_val))\n\n new_start = pid * M\n new_offsets = new_start + p2_arange\n tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask)\n tl.store(output_maxs + pid, max_val)\n\n def quantize_columnwise_and_transpose(x: torch.Tensor):\n M, N = x.shape\n output = torch.empty(N, M, device=x.device, dtype=torch.int8)\n output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(M))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2)\n return output, output_maxs","source_hash":"c09bbb8aa605d84ab832f5da7d302b5b73219421dae9ecc531efa283a247fb4f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_columnwise_and_transpose._quantize_columnwise_and_transpose","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_columnwise_and_transpose._quantize_columnwise_and_transpose#L38-L62","kind":"function","name":"_quantize_columnwise_and_transpose","path":"bitsandbytes/triton/quantize_columnwise_and_transpose.py","language":"python","start_line":38,"end_line":62,"context_start_line":18,"context_end_line":75,"code":" @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),\n triton.Config({}, num_stages=4),\n triton.Config({}, num_stages=8),\n triton.Config({}, num_stages=16),\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),\n triton.Config({}, num_stages=4, num_warps=8),\n triton.Config({}, num_stages=8, num_warps=8),\n triton.Config({}, num_stages=16, num_warps=8),\n triton.Config({}, num_warps=1),\n triton.Config({}, num_warps=2),\n triton.Config({}, num_warps=4),\n triton.Config({}, num_warps=8),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_columnwise_and_transpose(\n x_ptr,\n output_ptr,\n output_maxs,\n n_elements,\n M: tl.constexpr,\n N: tl.constexpr,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid\n p2_arange = tl.arange(0, P2)\n p2_arange_mask = p2_arange < M\n arange = p2_arange * N\n offsets = block_start + arange\n x = tl.load(x_ptr + offsets, mask=p2_arange_mask)\n abs_x = tl.abs(x)\n max_val = tl.max(tl.where(p2_arange_mask, abs_x, 0), axis=0)\n output = tl.libdevice.llrint(127.0 * (x / max_val))\n\n new_start = pid * M\n new_offsets = new_start + p2_arange\n tl.store(output_ptr + new_offsets, output, mask=p2_arange_mask)\n tl.store(output_maxs + pid, max_val)\n\n def quantize_columnwise_and_transpose(x: torch.Tensor):\n M, N = x.shape\n output = torch.empty(N, M, device=x.device, dtype=torch.int8)\n output_maxs = torch.empty(x.shape[1], device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(M))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (triton.cdiv(n_elements, meta[\"BLOCK_SIZE\"]),)\n _quantize_columnwise_and_transpose[grid](x, output, output_maxs, n_elements, M, N, BLOCK_SIZE=M, P2=P2)\n return output, output_maxs","source_hash":"c09bbb8aa605d84ab832f5da7d302b5b73219421dae9ecc531efa283a247fb4f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_rowwise","uri":"program://bitsandbytes/module/bitsandbytes.triton.quantize_rowwise#L1-L67","kind":"module","name":"bitsandbytes.triton.quantize_rowwise","path":"bitsandbytes/triton/quantize_rowwise.py","language":"python","start_line":1,"end_line":67,"context_start_line":1,"context_end_line":67,"code":"import math\n\nimport torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def quantize_rowwise(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # rowwise quantize\n\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),\n triton.Config({}, num_stages=4, num_warps=8),\n triton.Config({}, num_stages=8, num_warps=8),\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),\n triton.Config({}, num_stages=4),\n triton.Config({}, num_stages=8),\n triton.Config({}, num_warps=1),\n triton.Config({}, num_warps=2),\n triton.Config({}, num_warps=4),\n triton.Config({}, num_warps=8),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_rowwise(\n x_ptr,\n output_ptr,\n output_maxs,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n arange = tl.arange(0, P2)\n offsets = block_start + arange\n row_mask = arange < BLOCK_SIZE\n x = tl.load(x_ptr + offsets, mask=row_mask)\n\n abs_x = tl.abs(x)\n max_val = tl.max(tl.where(row_mask, abs_x, 0), axis=0)\n output = tl.libdevice.llrint(127.0 * (x / max_val))\n tl.store(output_ptr + offsets, output, mask=row_mask)\n tl.store(output_maxs + pid, max_val)\n\n def quantize_rowwise(x: torch.Tensor):\n output = torch.empty(*x.shape, device=x.device, dtype=torch.int8)\n output_maxs = torch.empty(x.shape[0], device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (x.shape[0],)\n _quantize_rowwise[grid](x, output, output_maxs, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)\n return output, output_maxs","source_hash":"8a940efb13093778ac9a0a646dafc13ad26c924776d5d8be00149ac39f52bcda","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_rowwise.quantize_rowwise","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_rowwise.quantize_rowwise#L57-L67","kind":"function","name":"quantize_rowwise","path":"bitsandbytes/triton/quantize_rowwise.py","language":"python","start_line":57,"end_line":67,"context_start_line":37,"context_end_line":67,"code":" x_ptr,\n output_ptr,\n output_maxs,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n arange = tl.arange(0, P2)\n offsets = block_start + arange\n row_mask = arange < BLOCK_SIZE\n x = tl.load(x_ptr + offsets, mask=row_mask)\n\n abs_x = tl.abs(x)\n max_val = tl.max(tl.where(row_mask, abs_x, 0), axis=0)\n output = tl.libdevice.llrint(127.0 * (x / max_val))\n tl.store(output_ptr + offsets, output, mask=row_mask)\n tl.store(output_maxs + pid, max_val)\n\n def quantize_rowwise(x: torch.Tensor):\n output = torch.empty(*x.shape, device=x.device, dtype=torch.int8)\n output_maxs = torch.empty(x.shape[0], device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (x.shape[0],)\n _quantize_rowwise[grid](x, output, output_maxs, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)\n return output, output_maxs","source_hash":"8a940efb13093778ac9a0a646dafc13ad26c924776d5d8be00149ac39f52bcda","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.quantize_rowwise._quantize_rowwise","uri":"program://bitsandbytes/function/bitsandbytes.triton.quantize_rowwise._quantize_rowwise#L36-L55","kind":"function","name":"_quantize_rowwise","path":"bitsandbytes/triton/quantize_rowwise.py","language":"python","start_line":36,"end_line":55,"context_start_line":16,"context_end_line":67,"code":"\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),\n triton.Config({}, num_stages=4, num_warps=8),\n triton.Config({}, num_stages=8, num_warps=8),\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),\n triton.Config({}, num_stages=4),\n triton.Config({}, num_stages=8),\n triton.Config({}, num_warps=1),\n triton.Config({}, num_warps=2),\n triton.Config({}, num_warps=4),\n triton.Config({}, num_warps=8),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _quantize_rowwise(\n x_ptr,\n output_ptr,\n output_maxs,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n arange = tl.arange(0, P2)\n offsets = block_start + arange\n row_mask = arange < BLOCK_SIZE\n x = tl.load(x_ptr + offsets, mask=row_mask)\n\n abs_x = tl.abs(x)\n max_val = tl.max(tl.where(row_mask, abs_x, 0), axis=0)\n output = tl.libdevice.llrint(127.0 * (x / max_val))\n tl.store(output_ptr + offsets, output, mask=row_mask)\n tl.store(output_maxs + pid, max_val)\n\n def quantize_rowwise(x: torch.Tensor):\n output = torch.empty(*x.shape, device=x.device, dtype=torch.int8)\n output_maxs = torch.empty(x.shape[0], device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (x.shape[0],)\n _quantize_rowwise[grid](x, output, output_maxs, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)\n return output, output_maxs","source_hash":"8a940efb13093778ac9a0a646dafc13ad26c924776d5d8be00149ac39f52bcda","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.dequantize_rowwise","uri":"program://bitsandbytes/module/bitsandbytes.triton.dequantize_rowwise#L1-L64","kind":"module","name":"bitsandbytes.triton.dequantize_rowwise","path":"bitsandbytes/triton/dequantize_rowwise.py","language":"python","start_line":1,"end_line":64,"context_start_line":1,"context_end_line":64,"code":"import math\n\nimport torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # rowwise quantize\n\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),\n triton.Config({}, num_stages=4, num_warps=8),\n triton.Config({}, num_stages=8, num_warps=8),\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),\n triton.Config({}, num_stages=4),\n triton.Config({}, num_stages=8),\n triton.Config({}, num_warps=1),\n triton.Config({}, num_warps=2),\n triton.Config({}, num_warps=4),\n triton.Config({}, num_warps=8),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _dequantize_rowwise(\n x_ptr,\n state_x,\n output_ptr,\n inv_127,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n arange = tl.arange(0, P2)\n offsets = block_start + arange\n row_mask = arange < BLOCK_SIZE\n x = tl.load(x_ptr + offsets, mask=row_mask)\n max_val = tl.load(state_x + pid)\n output = max_val * x * inv_127\n tl.store(output_ptr + offsets, output, mask=row_mask)\n\n def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):\n output = torch.empty(*x.shape, device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (x.shape[0],)\n _dequantize_rowwise[grid](x, state_x, output, 1.0 / 127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)\n return output","source_hash":"fee3536265b033b4519097ecdd75c3c7ccb3860e478924e91bd0aaccec860222","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.dequantize_rowwise.dequantize_rowwise","uri":"program://bitsandbytes/function/bitsandbytes.triton.dequantize_rowwise.dequantize_rowwise#L55-L64","kind":"function","name":"dequantize_rowwise","path":"bitsandbytes/triton/dequantize_rowwise.py","language":"python","start_line":55,"end_line":64,"context_start_line":35,"context_end_line":64,"code":" @triton.jit\n def _dequantize_rowwise(\n x_ptr,\n state_x,\n output_ptr,\n inv_127,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n arange = tl.arange(0, P2)\n offsets = block_start + arange\n row_mask = arange < BLOCK_SIZE\n x = tl.load(x_ptr + offsets, mask=row_mask)\n max_val = tl.load(state_x + pid)\n output = max_val * x * inv_127\n tl.store(output_ptr + offsets, output, mask=row_mask)\n\n def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):\n output = torch.empty(*x.shape, device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (x.shape[0],)\n _dequantize_rowwise[grid](x, state_x, output, 1.0 / 127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)\n return output","source_hash":"fee3536265b033b4519097ecdd75c3c7ccb3860e478924e91bd0aaccec860222","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.dequantize_rowwise._dequantize_rowwise","uri":"program://bitsandbytes/function/bitsandbytes.triton.dequantize_rowwise._dequantize_rowwise#L36-L53","kind":"function","name":"_dequantize_rowwise","path":"bitsandbytes/triton/dequantize_rowwise.py","language":"python","start_line":36,"end_line":53,"context_start_line":16,"context_end_line":64,"code":"\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),\n triton.Config({}, num_stages=4, num_warps=8),\n triton.Config({}, num_stages=8, num_warps=8),\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),\n triton.Config({}, num_stages=4),\n triton.Config({}, num_stages=8),\n triton.Config({}, num_warps=1),\n triton.Config({}, num_warps=2),\n triton.Config({}, num_warps=4),\n triton.Config({}, num_warps=8),\n ],\n key=[\"n_elements\"],\n )\n @triton.jit\n def _dequantize_rowwise(\n x_ptr,\n state_x,\n output_ptr,\n inv_127,\n n_elements,\n BLOCK_SIZE: tl.constexpr,\n P2: tl.constexpr,\n ):\n pid = tl.program_id(axis=0)\n block_start = pid * BLOCK_SIZE\n arange = tl.arange(0, P2)\n offsets = block_start + arange\n row_mask = arange < BLOCK_SIZE\n x = tl.load(x_ptr + offsets, mask=row_mask)\n max_val = tl.load(state_x + pid)\n output = max_val * x * inv_127\n tl.store(output_ptr + offsets, output, mask=row_mask)\n\n def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):\n output = torch.empty(*x.shape, device=x.device, dtype=torch.float16)\n\n P2 = int(2 ** (math.ceil(math.log2(x.shape[1]))))\n\n assert x.is_cuda and output.is_cuda\n n_elements = output.numel()\n grid = lambda meta: (x.shape[0],)\n _dequantize_rowwise[grid](x, state_x, output, 1.0 / 127, n_elements, BLOCK_SIZE=x.shape[1], P2=P2)\n return output","source_hash":"fee3536265b033b4519097ecdd75c3c7ccb3860e478924e91bd0aaccec860222","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_mixed_dequantize","uri":"program://bitsandbytes/module/bitsandbytes.triton.int8_matmul_mixed_dequantize#L1-L206","kind":"module","name":"bitsandbytes.triton.int8_matmul_mixed_dequantize","path":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","language":"python","start_line":1,"end_line":206,"context_start_line":1,"context_end_line":206,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and global quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()\n\n def get_configs_io_bound():\n configs = []\n for num_stages in [2, 3, 4, 5, 6]:\n for block_m in [16, 32]:\n for block_k in [32, 64]:\n for block_n in [32, 64, 128, 256]:\n num_warps = 2 if block_n <= 64 else 4\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": 1},\n num_stages=num_stages,\n num_warps=num_warps,\n ),\n )\n # split_k\n for split_k in [2, 4, 8, 16]:\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": split_k},\n num_stages=num_stages,\n num_warps=num_warps,\n pre_hook=init_to_zero(\"C\"),\n ),\n )\n return configs\n\n @triton.autotune(\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n # good for int8\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n *get_configs_io_bound(),\n ],\n key=[\"M\", \"N\", \"K\"],\n prune_configs_by={\"early_config_prune\": early_config_prune, \"perf_model\": estimate_matmul_time, \"top_k\": 10},\n )\n @triton.heuristics(\n {\n \"EVEN_K\": lambda args: args[\"K\"] % (args[\"BLOCK_K\"] * args[\"SPLIT_K\"]) == 0,\n },\n )\n @triton.jit\n def _int8_matmul_mixed_dequantize(\n A,\n B,\n C,\n bias,\n state_x_ptr,\n state_w_ptr,\n M,\n N,\n K,\n divfactor: tl.constexpr,\n has_bias: tl.constexpr,\n stride_am,\n stride_ak,\n stride_bk,\n stride_bn,\n stride_cm,\n stride_cn,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n BLOCK_K: tl.constexpr,\n GROUP_M: tl.constexpr,\n SPLIT_K: tl.constexpr,\n EVEN_K: tl.constexpr,\n ACC_TYPE: tl.constexpr,\n ):\n # matrix multiplication\n pid = tl.program_id(0)\n pid_z = tl.program_id(1)\n grid_m = tl.cdiv(M, BLOCK_M)\n grid_n = tl.cdiv(N, BLOCK_N)\n # re-order program ID for better L2 performance\n width = GROUP_M * grid_n\n group_id = pid // width\n group_size = min(grid_m - group_id * GROUP_M, GROUP_M)\n pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // (group_size)\n # do matrix multiplication\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)\n rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)\n rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)\n # pointers\n A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)\n B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)\n\n # rematerialize rm and rn to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n\n w_factor = tl.load(state_w_ptr)\n x_factor = tl.load(state_x_ptr + ram)[:, None]\n\n # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)\n acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)\n for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):\n if EVEN_K:\n a = tl.load(A)\n b = tl.load(B)\n else:\n k_remaining = K - k * (BLOCK_K * SPLIT_K)\n a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)\n b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)\n acc += tl.dot(a, b)\n A += BLOCK_K * SPLIT_K * stride_ak\n B += BLOCK_K * SPLIT_K * stride_bk\n\n acc = w_factor * (x_factor * (acc * divfactor))\n acc = acc.to(C.dtype.element_ty)\n\n # conditionally add bias\n if has_bias:\n bias = tl.load(bias + rn).to(C.dtype.element_ty)\n acc = acc + bias[None, :]\n\n C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n # handles write-back with reduction-splitting\n if SPLIT_K == 1:\n tl.store(C, acc, mask=mask)\n else:\n tl.atomic_add(C, acc, mask=mask)\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n device = a.device\n divfactor = 1.0 / (127.0 * 127.0)\n has_bias = 0 if bias is None else 1\n # handle non-contiguous inputs if necessary\n if a.stride(0) > 1 and a.stride(1) > 1:\n a = a.contiguous()\n if b.stride(0) > 1 and b.stride(1) > 1:\n b = b.contiguous()\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n M, K = a.shape\n _, N = b.shape\n # allocates output\n c = torch.empty((M, N), device=device, dtype=torch.float16)\n # accumulator types\n ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32\n # launch int8_matmul_mixed_dequantize kernel\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]), META[\"SPLIT_K\"])\n _int8_matmul_mixed_dequantize[grid](\n a,\n b,\n c,\n bias,\n state_x,\n state_w,\n M,\n N,\n K,\n divfactor,\n has_bias,\n a.stride(0),\n a.stride(1),\n b.stride(0),\n b.stride(1),\n c.stride(0),\n c.stride(1),\n GROUP_M=8,\n ACC_TYPE=ACC_TYPE,\n )\n return c","source_hash":"00d38ef8d3adde6f165ea22c56ece9f24acb177e88b926ea3e85a75aae1db6de","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_mixed_dequantize.int8_matmul_mixed_dequantize","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_mixed_dequantize.int8_matmul_mixed_dequantize#L166-L206","kind":"function","name":"int8_matmul_mixed_dequantize","path":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","language":"python","start_line":166,"end_line":206,"context_start_line":146,"context_end_line":206,"code":" acc += tl.dot(a, b)\n A += BLOCK_K * SPLIT_K * stride_ak\n B += BLOCK_K * SPLIT_K * stride_bk\n\n acc = w_factor * (x_factor * (acc * divfactor))\n acc = acc.to(C.dtype.element_ty)\n\n # conditionally add bias\n if has_bias:\n bias = tl.load(bias + rn).to(C.dtype.element_ty)\n acc = acc + bias[None, :]\n\n C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n # handles write-back with reduction-splitting\n if SPLIT_K == 1:\n tl.store(C, acc, mask=mask)\n else:\n tl.atomic_add(C, acc, mask=mask)\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n device = a.device\n divfactor = 1.0 / (127.0 * 127.0)\n has_bias = 0 if bias is None else 1\n # handle non-contiguous inputs if necessary\n if a.stride(0) > 1 and a.stride(1) > 1:\n a = a.contiguous()\n if b.stride(0) > 1 and b.stride(1) > 1:\n b = b.contiguous()\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n M, K = a.shape\n _, N = b.shape\n # allocates output\n c = torch.empty((M, N), device=device, dtype=torch.float16)\n # accumulator types\n ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32\n # launch int8_matmul_mixed_dequantize kernel\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]), META[\"SPLIT_K\"])\n _int8_matmul_mixed_dequantize[grid](\n a,\n b,\n c,\n bias,\n state_x,\n state_w,\n M,\n N,\n K,\n divfactor,\n has_bias,\n a.stride(0),\n a.stride(1),\n b.stride(0),\n b.stride(1),\n c.stride(0),\n c.stride(1),\n GROUP_M=8,\n ACC_TYPE=ACC_TYPE,\n )\n return c","source_hash":"00d38ef8d3adde6f165ea22c56ece9f24acb177e88b926ea3e85a75aae1db6de","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_mixed_dequantize.init_to_zero","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_mixed_dequantize.init_to_zero#L20-L21","kind":"function","name":"init_to_zero","path":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","language":"python","start_line":20,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and global quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()\n\n def get_configs_io_bound():\n configs = []\n for num_stages in [2, 3, 4, 5, 6]:\n for block_m in [16, 32]:\n for block_k in [32, 64]:\n for block_n in [32, 64, 128, 256]:\n num_warps = 2 if block_n <= 64 else 4\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": 1},\n num_stages=num_stages,\n num_warps=num_warps,\n ),\n )\n # split_k\n for split_k in [2, 4, 8, 16]:\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": split_k},","source_hash":"00d38ef8d3adde6f165ea22c56ece9f24acb177e88b926ea3e85a75aae1db6de","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_mixed_dequantize.get_configs_io_bound","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_mixed_dequantize.get_configs_io_bound#L23-L47","kind":"function","name":"get_configs_io_bound","path":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","language":"python","start_line":23,"end_line":47,"context_start_line":3,"context_end_line":67,"code":"from bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and global quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()\n\n def get_configs_io_bound():\n configs = []\n for num_stages in [2, 3, 4, 5, 6]:\n for block_m in [16, 32]:\n for block_k in [32, 64]:\n for block_n in [32, 64, 128, 256]:\n num_warps = 2 if block_n <= 64 else 4\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": 1},\n num_stages=num_stages,\n num_warps=num_warps,\n ),\n )\n # split_k\n for split_k in [2, 4, 8, 16]:\n configs.append(\n triton.Config(\n {\"BLOCK_M\": block_m, \"BLOCK_N\": block_n, \"BLOCK_K\": block_k, \"SPLIT_K\": split_k},\n num_stages=num_stages,\n num_warps=num_warps,\n pre_hook=init_to_zero(\"C\"),\n ),\n )\n return configs\n\n @triton.autotune(\n configs=[\n # basic configs for compute-bound matmuls\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 32, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n # good for int8\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),","source_hash":"00d38ef8d3adde6f165ea22c56ece9f24acb177e88b926ea3e85a75aae1db6de","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.int8_matmul_mixed_dequantize._int8_matmul_mixed_dequantize","uri":"program://bitsandbytes/function/bitsandbytes.triton.int8_matmul_mixed_dequantize._int8_matmul_mixed_dequantize#L82-L164","kind":"function","name":"_int8_matmul_mixed_dequantize","path":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","language":"python","start_line":82,"end_line":164,"context_start_line":62,"context_end_line":184,"code":" triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=3, num_warps=8),\n triton.Config({\"BLOCK_M\": 256, \"BLOCK_N\": 64, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 256, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 128, \"BLOCK_K\": 128, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 64, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 128, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 128, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=4, num_warps=4),\n triton.Config({\"BLOCK_M\": 64, \"BLOCK_N\": 32, \"BLOCK_K\": 64, \"SPLIT_K\": 1}, num_stages=5, num_warps=2),\n *get_configs_io_bound(),\n ],\n key=[\"M\", \"N\", \"K\"],\n prune_configs_by={\"early_config_prune\": early_config_prune, \"perf_model\": estimate_matmul_time, \"top_k\": 10},\n )\n @triton.heuristics(\n {\n \"EVEN_K\": lambda args: args[\"K\"] % (args[\"BLOCK_K\"] * args[\"SPLIT_K\"]) == 0,\n },\n )\n @triton.jit\n def _int8_matmul_mixed_dequantize(\n A,\n B,\n C,\n bias,\n state_x_ptr,\n state_w_ptr,\n M,\n N,\n K,\n divfactor: tl.constexpr,\n has_bias: tl.constexpr,\n stride_am,\n stride_ak,\n stride_bk,\n stride_bn,\n stride_cm,\n stride_cn,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n BLOCK_K: tl.constexpr,\n GROUP_M: tl.constexpr,\n SPLIT_K: tl.constexpr,\n EVEN_K: tl.constexpr,\n ACC_TYPE: tl.constexpr,\n ):\n # matrix multiplication\n pid = tl.program_id(0)\n pid_z = tl.program_id(1)\n grid_m = tl.cdiv(M, BLOCK_M)\n grid_n = tl.cdiv(N, BLOCK_N)\n # re-order program ID for better L2 performance\n width = GROUP_M * grid_n\n group_id = pid // width\n group_size = min(grid_m - group_id * GROUP_M, GROUP_M)\n pid_m = group_id * GROUP_M + (pid % group_size)\n pid_n = (pid % width) // (group_size)\n # do matrix multiplication\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)\n rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)\n rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)\n # pointers\n A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)\n B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)\n\n # rematerialize rm and rn to save registers\n rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)\n rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)\n\n w_factor = tl.load(state_w_ptr)\n x_factor = tl.load(state_x_ptr + ram)[:, None]\n\n # acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)\n acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.int32)\n for k in range(0, tl.cdiv(K, BLOCK_K * SPLIT_K)):\n if EVEN_K:\n a = tl.load(A)\n b = tl.load(B)\n else:\n k_remaining = K - k * (BLOCK_K * SPLIT_K)\n a = tl.load(A, mask=rk[None, :] < k_remaining, other=0.0)\n b = tl.load(B, mask=rk[:, None] < k_remaining, other=0.0)\n acc += tl.dot(a, b)\n A += BLOCK_K * SPLIT_K * stride_ak\n B += BLOCK_K * SPLIT_K * stride_bk\n\n acc = w_factor * (x_factor * (acc * divfactor))\n acc = acc.to(C.dtype.element_ty)\n\n # conditionally add bias\n if has_bias:\n bias = tl.load(bias + rn).to(C.dtype.element_ty)\n acc = acc + bias[None, :]\n\n C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)\n mask = (rm < M)[:, None] & (rn < N)[None, :]\n # handles write-back with reduction-splitting\n if SPLIT_K == 1:\n tl.store(C, acc, mask=mask)\n else:\n tl.atomic_add(C, acc, mask=mask)\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n device = a.device\n divfactor = 1.0 / (127.0 * 127.0)\n has_bias = 0 if bias is None else 1\n # handle non-contiguous inputs if necessary\n if a.stride(0) > 1 and a.stride(1) > 1:\n a = a.contiguous()\n if b.stride(0) > 1 and b.stride(1) > 1:\n b = b.contiguous()\n # checks constraints\n assert a.shape[1] == b.shape[0], \"incompatible dimensions\"\n M, K = a.shape\n _, N = b.shape\n # allocates output\n c = torch.empty((M, N), device=device, dtype=torch.float16)\n # accumulator types\n ACC_TYPE = tl.float32 # if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32\n # launch int8_matmul_mixed_dequantize kernel\n grid = lambda META: (triton.cdiv(M, META[\"BLOCK_M\"]) * triton.cdiv(N, META[\"BLOCK_N\"]), META[\"SPLIT_K\"])","source_hash":"00d38ef8d3adde6f165ea22c56ece9f24acb177e88b926ea3e85a75aae1db6de","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model","uri":"program://bitsandbytes/module/bitsandbytes.triton.matmul_perf_model#L1-L211","kind":"module","name":"bitsandbytes.triton.matmul_perf_model","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":1,"end_line":211,"context_start_line":1,"context_end_line":211,"code":"# Adapted from https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/kernels/matmul_perf_model.py\n# https://github.com/triton-lang/kernels is licensed under the MIT License.\n\nimport functools\nimport heapq\n\nimport torch\n\nfrom triton import cdiv\nfrom triton.runtime import driver\nfrom triton.testing import (\n get_dram_gbps,\n get_max_simd_tflops,\n get_max_tensorcore_tflops,\n nvsmi,\n)\n\n\n@functools.lru_cache\ndef get_clock_rate_in_khz():\n try:\n return nvsmi([\"clocks.max.sm\"])[0] * 1e3\n except FileNotFoundError:\n import pynvml\n\n pynvml.nvmlInit()\n handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n return pynvml.nvmlDeviceGetMaxClockInfo(handle, pynvml.NVML_CLOCK_SM) * 1e3\n\n\ndef get_tensorcore_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps)\n / num_subcores\n * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_simd_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_tflops(device, num_ctas, num_warps, dtype):\n capability = torch.cuda.get_device_capability(device)\n if capability[0] < 8 and dtype == torch.float32:\n return get_simd_tflops(device, num_ctas, num_warps, dtype)\n return get_tensorcore_tflops(device, num_ctas, num_warps, dtype)\n\n\ndef estimate_matmul_time(\n # backend, device,\n num_warps,\n num_stages, #\n A,\n B,\n C, #\n M,\n N,\n K, #\n BLOCK_M,\n BLOCK_N,\n BLOCK_K,\n SPLIT_K, #\n debug=False,\n **kwargs, #\n):\n \"\"\"return estimated running time in ms\n = max(compute, loading) + store\"\"\"\n device = torch.cuda.current_device()\n dtype = A.dtype\n dtsize = A.element_size()\n\n num_cta_m = cdiv(M, BLOCK_M)\n num_cta_n = cdiv(N, BLOCK_N)\n num_cta_k = SPLIT_K\n num_ctas = num_cta_m * num_cta_n * num_cta_k\n\n # If the input is smaller than the block size\n M, N = max(M, BLOCK_M), max(N, BLOCK_N)\n\n # time to compute\n total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS\n tput = get_tflops(device, num_ctas, num_warps, dtype)\n compute_ms = total_ops / tput\n\n # time to load data\n num_sm = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"]\n active_cta_ratio = min(1, num_ctas / num_sm)\n active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate\n active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5%\n dram_bw = get_dram_gbps(device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s\n l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?)\n # assume 80% of (following) loads are in L2 cache\n load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1))\n load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1)\n load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1))\n load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1)\n # total\n total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB\n total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024)\n # loading time in ms\n load_ms = total_dram / dram_bw + total_l2 / l2_bw\n\n # estimate storing time\n store_bw = dram_bw * 0.6 # :o\n store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB\n if SPLIT_K == 1:\n store_ms = store_c_dram / store_bw\n else:\n reduce_bw = store_bw\n store_ms = store_c_dram / reduce_bw\n # c.zero_()\n zero_ms = M * N * 2 / (1024 * 1024) / store_bw\n store_ms += zero_ms\n\n total_time_ms = max(compute_ms, load_ms) + store_ms\n if debug:\n print(\n f\"Total time: {total_time_ms}ms, compute time: {compute_ms}ms, \"\n f\"loading time: {load_ms}ms, store time: {store_ms}ms, \"\n f\"Activate CTAs: {active_cta_ratio * 100}%\"\n )\n return total_time_ms\n\n\ndef early_config_prune(configs, named_args, **kwargs):\n device = torch.cuda.current_device()\n capability = torch.cuda.get_device_capability()\n # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages\n dtsize = named_args[\"A\"].element_size()\n dtype = named_args[\"A\"].dtype\n\n # 1. make sure we have enough smem\n pruned_configs = []\n for config in configs:\n kw = config.kwargs\n BLOCK_M, BLOCK_N, BLOCK_K, num_stages = (\n kw[\"BLOCK_M\"],\n kw[\"BLOCK_N\"],\n kw[\"BLOCK_K\"],\n config.num_stages,\n )\n\n max_shared_memory = driver.active.utils.get_device_properties(device)[\"max_shared_mem\"]\n required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize\n if required_shared_memory <= max_shared_memory:\n pruned_configs.append(config)\n configs = pruned_configs\n\n # Some dtypes do not allow atomic_add\n if dtype not in [torch.float16, torch.float32]:\n configs = [config for config in configs if config.kwargs[\"SPLIT_K\"] == 1]\n\n # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps)\n configs_map = {}\n for config in configs:\n kw = config.kwargs\n BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = (\n kw[\"BLOCK_M\"],\n kw[\"BLOCK_N\"],\n kw[\"BLOCK_K\"],\n kw[\"SPLIT_K\"],\n config.num_warps,\n config.num_stages,\n )\n\n key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps)\n if key in configs_map:\n configs_map[key].append((config, num_stages))\n else:\n configs_map[key] = [(config, num_stages)]\n\n pruned_configs = []\n for k, v in configs_map.items():\n BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k\n if capability[0] >= 8:\n # compute cycles (only works for ampere GPUs)\n mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16)\n mma_cycles = mmas / min(4, num_warps) * 8\n\n ldgsts_latency = 300 # Does this matter?\n optimal_num_stages = ldgsts_latency / mma_cycles\n\n # nearest stages, prefer large #stages\n nearest = heapq.nsmallest(\n 2,\n v,\n key=lambda x: (\n 10 + abs(x[1] - optimal_num_stages)\n if (x[1] - optimal_num_stages) < 0\n else x[1] - optimal_num_stages\n ),\n )\n\n for n in nearest:\n pruned_configs.append(n[0])\n else: # Volta & Turing only supports num_stages <= 2\n random_config = v[0][0]\n random_config.num_stages = 2\n pruned_configs.append(random_config)\n return pruned_configs","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model.get_clock_rate_in_khz","uri":"program://bitsandbytes/function/bitsandbytes.triton.matmul_perf_model.get_clock_rate_in_khz#L20-L28","kind":"function","name":"get_clock_rate_in_khz","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":20,"end_line":28,"context_start_line":1,"context_end_line":48,"code":"# Adapted from https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/kernels/matmul_perf_model.py\n# https://github.com/triton-lang/kernels is licensed under the MIT License.\n\nimport functools\nimport heapq\n\nimport torch\n\nfrom triton import cdiv\nfrom triton.runtime import driver\nfrom triton.testing import (\n get_dram_gbps,\n get_max_simd_tflops,\n get_max_tensorcore_tflops,\n nvsmi,\n)\n\n\n@functools.lru_cache\ndef get_clock_rate_in_khz():\n try:\n return nvsmi([\"clocks.max.sm\"])[0] * 1e3\n except FileNotFoundError:\n import pynvml\n\n pynvml.nvmlInit()\n handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n return pynvml.nvmlDeviceGetMaxClockInfo(handle, pynvml.NVML_CLOCK_SM) * 1e3\n\n\ndef get_tensorcore_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps)\n / num_subcores\n * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_simd_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device)","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model.get_tensorcore_tflops","uri":"program://bitsandbytes/function/bitsandbytes.triton.matmul_perf_model.get_tensorcore_tflops#L31-L40","kind":"function","name":"get_tensorcore_tflops","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":31,"end_line":40,"context_start_line":11,"context_end_line":60,"code":"from triton.testing import (\n get_dram_gbps,\n get_max_simd_tflops,\n get_max_tensorcore_tflops,\n nvsmi,\n)\n\n\n@functools.lru_cache\ndef get_clock_rate_in_khz():\n try:\n return nvsmi([\"clocks.max.sm\"])[0] * 1e3\n except FileNotFoundError:\n import pynvml\n\n pynvml.nvmlInit()\n handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n return pynvml.nvmlDeviceGetMaxClockInfo(handle, pynvml.NVML_CLOCK_SM) * 1e3\n\n\ndef get_tensorcore_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps)\n / num_subcores\n * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_simd_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_tflops(device, num_ctas, num_warps, dtype):\n capability = torch.cuda.get_device_capability(device)\n if capability[0] < 8 and dtype == torch.float32:\n return get_simd_tflops(device, num_ctas, num_warps, dtype)\n return get_tensorcore_tflops(device, num_ctas, num_warps, dtype)\n\n\ndef estimate_matmul_time(","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model.get_simd_tflops","uri":"program://bitsandbytes/function/bitsandbytes.triton.matmul_perf_model.get_simd_tflops#L43-L50","kind":"function","name":"get_simd_tflops","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":43,"end_line":50,"context_start_line":23,"context_end_line":70,"code":" except FileNotFoundError:\n import pynvml\n\n pynvml.nvmlInit()\n handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n return pynvml.nvmlDeviceGetMaxClockInfo(handle, pynvml.NVML_CLOCK_SM) * 1e3\n\n\ndef get_tensorcore_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps)\n / num_subcores\n * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_simd_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_tflops(device, num_ctas, num_warps, dtype):\n capability = torch.cuda.get_device_capability(device)\n if capability[0] < 8 and dtype == torch.float32:\n return get_simd_tflops(device, num_ctas, num_warps, dtype)\n return get_tensorcore_tflops(device, num_ctas, num_warps, dtype)\n\n\ndef estimate_matmul_time(\n # backend, device,\n num_warps,\n num_stages, #\n A,\n B,\n C, #\n M,\n N,\n K, #\n BLOCK_M,","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model.get_tflops","uri":"program://bitsandbytes/function/bitsandbytes.triton.matmul_perf_model.get_tflops#L53-L57","kind":"function","name":"get_tflops","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":53,"end_line":57,"context_start_line":33,"context_end_line":77,"code":" total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps)\n / num_subcores\n * get_max_tensorcore_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_simd_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_tflops(device, num_ctas, num_warps, dtype):\n capability = torch.cuda.get_device_capability(device)\n if capability[0] < 8 and dtype == torch.float32:\n return get_simd_tflops(device, num_ctas, num_warps, dtype)\n return get_tensorcore_tflops(device, num_ctas, num_warps, dtype)\n\n\ndef estimate_matmul_time(\n # backend, device,\n num_warps,\n num_stages, #\n A,\n B,\n C, #\n M,\n N,\n K, #\n BLOCK_M,\n BLOCK_N,\n BLOCK_K,\n SPLIT_K, #\n debug=False,\n **kwargs, #\n):\n \"\"\"return estimated running time in ms","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model.estimate_matmul_time","uri":"program://bitsandbytes/function/bitsandbytes.triton.matmul_perf_model.estimate_matmul_time#L60-L133","kind":"function","name":"estimate_matmul_time","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":60,"end_line":133,"context_start_line":40,"context_end_line":153,"code":" return tflops\n\n\ndef get_simd_tflops(device, num_ctas, num_warps, dtype):\n \"\"\"return compute throughput in TOPS\"\"\"\n total_warps = num_ctas * min(num_warps, 4)\n num_subcores = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"] * 4 # on recent GPUs\n tflops = (\n min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, get_clock_rate_in_khz(), device)\n )\n return tflops\n\n\ndef get_tflops(device, num_ctas, num_warps, dtype):\n capability = torch.cuda.get_device_capability(device)\n if capability[0] < 8 and dtype == torch.float32:\n return get_simd_tflops(device, num_ctas, num_warps, dtype)\n return get_tensorcore_tflops(device, num_ctas, num_warps, dtype)\n\n\ndef estimate_matmul_time(\n # backend, device,\n num_warps,\n num_stages, #\n A,\n B,\n C, #\n M,\n N,\n K, #\n BLOCK_M,\n BLOCK_N,\n BLOCK_K,\n SPLIT_K, #\n debug=False,\n **kwargs, #\n):\n \"\"\"return estimated running time in ms\n = max(compute, loading) + store\"\"\"\n device = torch.cuda.current_device()\n dtype = A.dtype\n dtsize = A.element_size()\n\n num_cta_m = cdiv(M, BLOCK_M)\n num_cta_n = cdiv(N, BLOCK_N)\n num_cta_k = SPLIT_K\n num_ctas = num_cta_m * num_cta_n * num_cta_k\n\n # If the input is smaller than the block size\n M, N = max(M, BLOCK_M), max(N, BLOCK_N)\n\n # time to compute\n total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS\n tput = get_tflops(device, num_ctas, num_warps, dtype)\n compute_ms = total_ops / tput\n\n # time to load data\n num_sm = driver.active.utils.get_device_properties(device)[\"multiprocessor_count\"]\n active_cta_ratio = min(1, num_ctas / num_sm)\n active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate\n active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5%\n dram_bw = get_dram_gbps(device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s\n l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?)\n # assume 80% of (following) loads are in L2 cache\n load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1))\n load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1)\n load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1))\n load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1)\n # total\n total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB\n total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024)\n # loading time in ms\n load_ms = total_dram / dram_bw + total_l2 / l2_bw\n\n # estimate storing time\n store_bw = dram_bw * 0.6 # :o\n store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB\n if SPLIT_K == 1:\n store_ms = store_c_dram / store_bw\n else:\n reduce_bw = store_bw\n store_ms = store_c_dram / reduce_bw\n # c.zero_()\n zero_ms = M * N * 2 / (1024 * 1024) / store_bw\n store_ms += zero_ms\n\n total_time_ms = max(compute_ms, load_ms) + store_ms\n if debug:\n print(\n f\"Total time: {total_time_ms}ms, compute time: {compute_ms}ms, \"\n f\"loading time: {load_ms}ms, store time: {store_ms}ms, \"\n f\"Activate CTAs: {active_cta_ratio * 100}%\"\n )\n return total_time_ms\n\n\ndef early_config_prune(configs, named_args, **kwargs):\n device = torch.cuda.current_device()\n capability = torch.cuda.get_device_capability()\n # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages\n dtsize = named_args[\"A\"].element_size()\n dtype = named_args[\"A\"].dtype\n\n # 1. make sure we have enough smem\n pruned_configs = []\n for config in configs:\n kw = config.kwargs\n BLOCK_M, BLOCK_N, BLOCK_K, num_stages = (\n kw[\"BLOCK_M\"],\n kw[\"BLOCK_N\"],\n kw[\"BLOCK_K\"],\n config.num_stages,\n )\n","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.triton.matmul_perf_model.early_config_prune","uri":"program://bitsandbytes/function/bitsandbytes.triton.matmul_perf_model.early_config_prune#L136-L211","kind":"function","name":"early_config_prune","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":136,"end_line":211,"context_start_line":116,"context_end_line":211,"code":" store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB\n if SPLIT_K == 1:\n store_ms = store_c_dram / store_bw\n else:\n reduce_bw = store_bw\n store_ms = store_c_dram / reduce_bw\n # c.zero_()\n zero_ms = M * N * 2 / (1024 * 1024) / store_bw\n store_ms += zero_ms\n\n total_time_ms = max(compute_ms, load_ms) + store_ms\n if debug:\n print(\n f\"Total time: {total_time_ms}ms, compute time: {compute_ms}ms, \"\n f\"loading time: {load_ms}ms, store time: {store_ms}ms, \"\n f\"Activate CTAs: {active_cta_ratio * 100}%\"\n )\n return total_time_ms\n\n\ndef early_config_prune(configs, named_args, **kwargs):\n device = torch.cuda.current_device()\n capability = torch.cuda.get_device_capability()\n # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages\n dtsize = named_args[\"A\"].element_size()\n dtype = named_args[\"A\"].dtype\n\n # 1. make sure we have enough smem\n pruned_configs = []\n for config in configs:\n kw = config.kwargs\n BLOCK_M, BLOCK_N, BLOCK_K, num_stages = (\n kw[\"BLOCK_M\"],\n kw[\"BLOCK_N\"],\n kw[\"BLOCK_K\"],\n config.num_stages,\n )\n\n max_shared_memory = driver.active.utils.get_device_properties(device)[\"max_shared_mem\"]\n required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize\n if required_shared_memory <= max_shared_memory:\n pruned_configs.append(config)\n configs = pruned_configs\n\n # Some dtypes do not allow atomic_add\n if dtype not in [torch.float16, torch.float32]:\n configs = [config for config in configs if config.kwargs[\"SPLIT_K\"] == 1]\n\n # group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps)\n configs_map = {}\n for config in configs:\n kw = config.kwargs\n BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = (\n kw[\"BLOCK_M\"],\n kw[\"BLOCK_N\"],\n kw[\"BLOCK_K\"],\n kw[\"SPLIT_K\"],\n config.num_warps,\n config.num_stages,\n )\n\n key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps)\n if key in configs_map:\n configs_map[key].append((config, num_stages))\n else:\n configs_map[key] = [(config, num_stages)]\n\n pruned_configs = []\n for k, v in configs_map.items():\n BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k\n if capability[0] >= 8:\n # compute cycles (only works for ampere GPUs)\n mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16)\n mma_cycles = mmas / min(4, num_warps) * 8\n\n ldgsts_latency = 300 # Does this matter?\n optimal_num_stages = ldgsts_latency / mma_cycles\n\n # nearest stages, prefer large #stages\n nearest = heapq.nsmallest(\n 2,\n v,\n key=lambda x: (\n 10 + abs(x[1] - optimal_num_stages)\n if (x[1] - optimal_num_stages) < 0\n else x[1] - optimal_num_stages\n ),\n )\n\n for n in nearest:\n pruned_configs.append(n[0])\n else: # Volta & Turing only supports num_stages <= 2\n random_config = v[0][0]\n random_config.num_stages = 2\n pruned_configs.append(random_config)\n return pruned_configs","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.nn.modules","uri":"program://bitsandbytes/module/bitsandbytes.research.nn.modules#L1-L76","kind":"module","name":"bitsandbytes.research.nn.modules","path":"bitsandbytes/research/nn/modules.py","language":"python","start_line":1,"end_line":76,"context_start_line":1,"context_end_line":76,"code":"from typing import TypeVar\n\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\n\nT = TypeVar(\"T\", bound=\"torch.nn.Module\")\n\n\nclass LinearFP8Mixed(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.research.matmul_fp8_mixed(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out\n\n\nclass LinearFP8Global(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.matmul_fp8_global(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out","source_hash":"d6e228ea96e0bf873160de9367ca61a5e525ffb1bb0308c2665e3568e1bb8a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.nn.modules.LinearFP8Mixed","uri":"program://bitsandbytes/class/bitsandbytes.research.nn.modules.LinearFP8Mixed#L11-L42","kind":"class","name":"LinearFP8Mixed","path":"bitsandbytes/research/nn/modules.py","language":"python","start_line":11,"end_line":42,"context_start_line":1,"context_end_line":62,"code":"from typing import TypeVar\n\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\n\nT = TypeVar(\"T\", bound=\"torch.nn.Module\")\n\n\nclass LinearFP8Mixed(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.research.matmul_fp8_mixed(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out\n\n\nclass LinearFP8Global(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)","source_hash":"d6e228ea96e0bf873160de9367ca61a5e525ffb1bb0308c2665e3568e1bb8a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.nn.modules.LinearFP8Global","uri":"program://bitsandbytes/class/bitsandbytes.research.nn.modules.LinearFP8Global#L45-L76","kind":"class","name":"LinearFP8Global","path":"bitsandbytes/research/nn/modules.py","language":"python","start_line":45,"end_line":76,"context_start_line":25,"context_end_line":76,"code":"\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.research.matmul_fp8_mixed(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out\n\n\nclass LinearFP8Global(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.matmul_fp8_global(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out","source_hash":"d6e228ea96e0bf873160de9367ca61a5e525ffb1bb0308c2665e3568e1bb8a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.nn.modules.__init__","uri":"program://bitsandbytes/function/bitsandbytes.research.nn.modules.__init__#L46-L58","kind":"function","name":"__init__","path":"bitsandbytes/research/nn/modules.py","language":"python","start_line":46,"end_line":58,"context_start_line":26,"context_end_line":76,"code":" def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.research.matmul_fp8_mixed(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out\n\n\nclass LinearFP8Global(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.matmul_fp8_global(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out","source_hash":"d6e228ea96e0bf873160de9367ca61a5e525ffb1bb0308c2665e3568e1bb8a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.nn.modules.forward","uri":"program://bitsandbytes/function/bitsandbytes.research.nn.modules.forward#L60-L76","kind":"function","name":"forward","path":"bitsandbytes/research/nn/modules.py","language":"python","start_line":60,"end_line":76,"context_start_line":40,"context_end_line":76,"code":" out += self.bias\n\n return out\n\n\nclass LinearFP8Global(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n self.bsz2 = k\n break\n\n def forward(self, x: torch.Tensor):\n if self.fw_code is None:\n self.bw_code = bnb.functional.create_fp8_map(True, 5, 2, 8).to(x.device)\n self.fw_code = bnb.functional.create_fp8_map(True, 4, 3, 8).to(x.device)\n\n out = bnb.matmul_fp8_global(\n x,\n self.weight.t(),\n fw_code=self.fw_code,\n bw_code=self.bw_code,\n bsz=self.bsz,\n bsz2=self.bsz2,\n )\n if self.bias is not None:\n out += self.bias\n\n return out","source_hash":"d6e228ea96e0bf873160de9367ca61a5e525ffb1bb0308c2665e3568e1bb8a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions","uri":"program://bitsandbytes/module/bitsandbytes.research.autograd._functions#L1-L396","kind":"module","name":"bitsandbytes.research.autograd._functions","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":1,"end_line":396,"context_start_line":1,"context_end_line":396,"code":"from functools import reduce # Required in Python 3\nimport operator\nfrom typing import Optional\nimport warnings\n\nimport torch\n\nfrom bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState\nimport bitsandbytes.functional as F\n\n\n# math.prod not compatible with python < 3.8\ndef prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\n\nclass MatMulFP8Mixed(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n\n B_shape = B.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n cA, state = F.quantize_blockwise(A, code=fw_code, blocksize=bsz)\n fp8A = F.dequantize_blockwise(cA, state, blocksize=bsz).to(A.dtype)\n\n cB, state = F.quantize(B.float(), code=fw_code)\n fp8B = F.dequantize(cB, state).to(B.dtype)\n\n output = torch.matmul(fp8A, fp8B)\n\n # output is half\n\n # 3. Save state\n ctx.fw_code = fw_code\n ctx.bw_code = bw_code\n ctx.bsz = bsz\n ctx.bsz2 = bsz2\n ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype\n\n if any(ctx.needs_input_grad[:2]):\n # NOTE: we send back A, and re-quant.\n ctx.tensors = (A, fp8B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None\n\n req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad\n A, B = ctx.tensors\n\n grad_A, grad_B = None, None\n\n # TODO: Fix blocksize to be output_dim\n cgrad_out, state = F.quantize_blockwise(grad_output, code=ctx.bw_code, blocksize=ctx.bsz2)\n fp8out = F.dequantize_blockwise(cgrad_out, state, blocksize=ctx.bsz2).to(grad_output.dtype)\n\n # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)\n # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)\n\n # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')\n # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose\n # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n # cA, state = F.quantize(At.float(), code=ctx.fw_code)\n # fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass MatMulFP8Global(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n\n B_shape = B.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n cA, state = F.quantize(A.float(), code=fw_code)\n fp8A = F.dequantize(cA, state).to(A.dtype)\n\n cB, state = F.quantize(B.float(), code=fw_code)\n fp8B = F.dequantize(cB, state).to(B.dtype)\n\n output = torch.matmul(fp8A, fp8B)\n\n # output is half\n\n # 3. Save state\n ctx.fw_code = fw_code\n ctx.bw_code = bw_code\n ctx.bsz = bsz\n ctx.bsz2 = bsz2\n ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype\n\n if any(ctx.needs_input_grad[:2]):\n # NOTE: we send back A, and re-quant.\n ctx.tensors = (A, fp8B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None\n\n req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad\n A, B = ctx.tensors\n\n grad_A, grad_B = None, None\n\n # TODO: Fix blocksize to be output_dim\n cgrad_out, state = F.quantize(grad_output.float(), code=ctx.bw_code)\n fp8out = F.dequantize(cgrad_out, state).to(grad_output.dtype)\n\n # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)\n # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)\n\n # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')\n # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose\n # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n cA, state = F.quantize(At.float(), code=ctx.fw_code)\n fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass SwitchBackBnb(torch.autograd.Function):\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = None):\n state = state or MatmulLtState()\n\n # default to pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n if A.shape[-1] == B.shape[0]:\n return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Quantize A\n # 2. Quantize B\n # 3. Matmul\n # 4. Mixed-precision decomposition matmul\n # 5. Save state\n input_shape = A.shape\n if state.outlier_pool is None:\n state.outlier_pool = GlobalOutlierPooler.get_instance()\n\n # Cast A to fp16\n if A.dtype != torch.float16:\n warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n\n # 1. Quantize A\n if len(A.shape) == 3:\n A = A.view(-1, A.shape[-1]).contiguous()\n CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold)\n\n if state.threshold > 0.0 and outlier_cols is not None:\n if state.has_fp16_weights:\n idx = outlier_cols\n CA[:, idx] = 0\n subA = A[:, idx]\n state.subB = B[:, idx].t().contiguous()\n state.idx = idx\n else:\n if state.SB is None:\n state.SB = (state.CB.shape, \"row\")\n else:\n if not state.has_fp16_weights and state.SB is None:\n state.SB = (state.CB.shape, \"row\")\n subA = None\n\n # 2. Quantize B\n if state.has_fp16_weights:\n # print('B shape', B.shape)\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.SB is None:\n state.reset_grads()\n (\n state.CB,\n state.CBt,\n state.SCB,\n state.SCBt,\n _,\n ) = F.int8_double_quant(B.to(torch.float16))\n state.SB = (state.CB.shape, \"row\")\n else:\n has_grad = False\n\n if outlier_cols is not None and not state.has_fp16_weights:\n # extract outliers\n state.idx = outlier_cols\n outliers = state.CB[:, state.idx.long()].clone()\n state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)\n CA[:, state.idx.long()] = 0\n\n subA = A[:, state.idx.long()]\n\n shapeB = state.SB[0]\n\n if len(input_shape) == 3:\n output_shape = (input_shape[0], input_shape[1], shapeB[0])\n else:\n output_shape = (input_shape[0], shapeB[0])\n\n # 3. Matmul\n out32 = F.int8_linear_matmul(CA, state.CB)\n # we apply the fused bias here\n\n if bias is None or bias.dtype == torch.float16:\n output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=bias).to(A.dtype)\n else: # apply bias separately\n output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=None).to(A.dtype)\n output.add_(bias)\n\n # 4. Mixed-precision decomposition matmul\n if outlier_cols is not None and subA is not None:\n output += torch.matmul(subA, state.subB)\n\n # 5. Save state\n ctx.state = state\n\n ctx.grad_shape = input_shape\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (CAt, subA, A)\n ctx.tensor_states = (SCAt, state.idx)\n else:\n ctx.tensors = [None, None, None]\n ctx.tensor_states = (None, None)\n ctx.save_for_backward(None, None)\n\n clone_func = torch.clone if len(output_shape) == 3 else lambda x: x\n return clone_func(output.view(output_shape))\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n CAt, subA, A = ctx.tensors\n SCAt, idx = ctx.tensor_states\n state = ctx.state\n grad_A = grad_B = grad_bias = None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n Cgrad, Cgradt, SCgrad, SCgradt, outlier_cols = F.int8_double_quant(grad_output.to(torch.float16))\n\n if req_gradB:\n # print('back A shape', A.shape)\n # print('grad output t shape', grad_output.t().shape)\n grad_B = torch.matmul(grad_output.t(), A)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)\n else:\n raise Exception(\"State must contain either CBt or CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef get_block_sizes(input_matrix, weight_matrix):\n input_features = input_matrix.shape[-1]\n output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n bsz, bsz2 = 1024, 1024\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n bsz2 = k\n break\n\n return bsz, bsz2\n\n\ndef matmul_fp8_global(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef matmul_fp8_mixed(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef switchback_bnb(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias=None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n return SwitchBackBnb.apply(A, B, out, bias, state)","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.prod","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.prod#L13-L14","kind":"function","name":"prod","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":13,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"from functools import reduce # Required in Python 3\nimport operator\nfrom typing import Optional\nimport warnings\n\nimport torch\n\nfrom bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState\nimport bitsandbytes.functional as F\n\n\n# math.prod not compatible with python < 3.8\ndef prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\n\nclass MatMulFP8Mixed(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n\n B_shape = B.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.MatMulFP8Mixed","uri":"program://bitsandbytes/class/bitsandbytes.research.autograd._functions.MatMulFP8Mixed#L17-L98","kind":"class","name":"MatMulFP8Mixed","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":17,"end_line":98,"context_start_line":1,"context_end_line":118,"code":"from functools import reduce # Required in Python 3\nimport operator\nfrom typing import Optional\nimport warnings\n\nimport torch\n\nfrom bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState\nimport bitsandbytes.functional as F\n\n\n# math.prod not compatible with python < 3.8\ndef prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\n\nclass MatMulFP8Mixed(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n\n B_shape = B.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n cA, state = F.quantize_blockwise(A, code=fw_code, blocksize=bsz)\n fp8A = F.dequantize_blockwise(cA, state, blocksize=bsz).to(A.dtype)\n\n cB, state = F.quantize(B.float(), code=fw_code)\n fp8B = F.dequantize(cB, state).to(B.dtype)\n\n output = torch.matmul(fp8A, fp8B)\n\n # output is half\n\n # 3. Save state\n ctx.fw_code = fw_code\n ctx.bw_code = bw_code\n ctx.bsz = bsz\n ctx.bsz2 = bsz2\n ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype\n\n if any(ctx.needs_input_grad[:2]):\n # NOTE: we send back A, and re-quant.\n ctx.tensors = (A, fp8B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None\n\n req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad\n A, B = ctx.tensors\n\n grad_A, grad_B = None, None\n\n # TODO: Fix blocksize to be output_dim\n cgrad_out, state = F.quantize_blockwise(grad_output, code=ctx.bw_code, blocksize=ctx.bsz2)\n fp8out = F.dequantize_blockwise(cgrad_out, state, blocksize=ctx.bsz2).to(grad_output.dtype)\n\n # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)\n # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)\n\n # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')\n # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose\n # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n # cA, state = F.quantize(At.float(), code=ctx.fw_code)\n # fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass MatMulFP8Global(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n\n B_shape = B.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.MatMulFP8Global","uri":"program://bitsandbytes/class/bitsandbytes.research.autograd._functions.MatMulFP8Global#L101-L182","kind":"class","name":"MatMulFP8Global","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":101,"end_line":182,"context_start_line":81,"context_end_line":202,"code":" # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')\n # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose\n # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n # cA, state = F.quantize(At.float(), code=ctx.fw_code)\n # fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(At.to(grad_output.dtype), grad_output).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass MatMulFP8Global(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, fw_code=None, bw_code=None, bsz=1024, bsz2=1024):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n\n B_shape = B.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n cA, state = F.quantize(A.float(), code=fw_code)\n fp8A = F.dequantize(cA, state).to(A.dtype)\n\n cB, state = F.quantize(B.float(), code=fw_code)\n fp8B = F.dequantize(cB, state).to(B.dtype)\n\n output = torch.matmul(fp8A, fp8B)\n\n # output is half\n\n # 3. Save state\n ctx.fw_code = fw_code\n ctx.bw_code = bw_code\n ctx.bsz = bsz\n ctx.bsz2 = bsz2\n ctx.dtype_A, ctx.dtype_B = A.dtype, B.dtype\n\n if any(ctx.needs_input_grad[:2]):\n # NOTE: we send back A, and re-quant.\n ctx.tensors = (A, fp8B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, None, None, None, None\n\n req_gradA, req_gradB, _, _, _, _, _ = ctx.needs_input_grad\n A, B = ctx.tensors\n\n grad_A, grad_B = None, None\n\n # TODO: Fix blocksize to be output_dim\n cgrad_out, state = F.quantize(grad_output.float(), code=ctx.bw_code)\n fp8out = F.dequantize(cgrad_out, state).to(grad_output.dtype)\n\n # cgrad_output_2, state_2 = F.quantize(grad_output.float(), code=ctx.bw_code)\n # fp8out_2 = F.dequantize(cgrad_output_2, state_2).to(grad_output.dtype)\n\n # grad_output_reshape = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')\n # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose\n # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n cA, state = F.quantize(At.float(), code=ctx.fw_code)\n fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass SwitchBackBnb(torch.autograd.Function):\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = None):\n state = state or MatmulLtState()\n\n # default to pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n if A.shape[-1] == B.shape[0]:\n return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Quantize A","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.SwitchBackBnb","uri":"program://bitsandbytes/class/bitsandbytes.research.autograd._functions.SwitchBackBnb#L185-L337","kind":"class","name":"SwitchBackBnb","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":185,"end_line":337,"context_start_line":165,"context_end_line":357,"code":" # fp8grad_transpose, stategrad_transpose = F.vectorwise_quant(grad_output_reshape, dim=0, quant_type='vector')\n # fp8out_transpose = (fp8grad_transpose / 7) * stategrad_transpose\n # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n cA, state = F.quantize(At.float(), code=ctx.fw_code)\n fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass SwitchBackBnb(torch.autograd.Function):\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = None):\n state = state or MatmulLtState()\n\n # default to pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n if A.shape[-1] == B.shape[0]:\n return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Quantize A\n # 2. Quantize B\n # 3. Matmul\n # 4. Mixed-precision decomposition matmul\n # 5. Save state\n input_shape = A.shape\n if state.outlier_pool is None:\n state.outlier_pool = GlobalOutlierPooler.get_instance()\n\n # Cast A to fp16\n if A.dtype != torch.float16:\n warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n\n # 1. Quantize A\n if len(A.shape) == 3:\n A = A.view(-1, A.shape[-1]).contiguous()\n CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold)\n\n if state.threshold > 0.0 and outlier_cols is not None:\n if state.has_fp16_weights:\n idx = outlier_cols\n CA[:, idx] = 0\n subA = A[:, idx]\n state.subB = B[:, idx].t().contiguous()\n state.idx = idx\n else:\n if state.SB is None:\n state.SB = (state.CB.shape, \"row\")\n else:\n if not state.has_fp16_weights and state.SB is None:\n state.SB = (state.CB.shape, \"row\")\n subA = None\n\n # 2. Quantize B\n if state.has_fp16_weights:\n # print('B shape', B.shape)\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.SB is None:\n state.reset_grads()\n (\n state.CB,\n state.CBt,\n state.SCB,\n state.SCBt,\n _,\n ) = F.int8_double_quant(B.to(torch.float16))\n state.SB = (state.CB.shape, \"row\")\n else:\n has_grad = False\n\n if outlier_cols is not None and not state.has_fp16_weights:\n # extract outliers\n state.idx = outlier_cols\n outliers = state.CB[:, state.idx.long()].clone()\n state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)\n CA[:, state.idx.long()] = 0\n\n subA = A[:, state.idx.long()]\n\n shapeB = state.SB[0]\n\n if len(input_shape) == 3:\n output_shape = (input_shape[0], input_shape[1], shapeB[0])\n else:\n output_shape = (input_shape[0], shapeB[0])\n\n # 3. Matmul\n out32 = F.int8_linear_matmul(CA, state.CB)\n # we apply the fused bias here\n\n if bias is None or bias.dtype == torch.float16:\n output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=bias).to(A.dtype)\n else: # apply bias separately\n output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=None).to(A.dtype)\n output.add_(bias)\n\n # 4. Mixed-precision decomposition matmul\n if outlier_cols is not None and subA is not None:\n output += torch.matmul(subA, state.subB)\n\n # 5. Save state\n ctx.state = state\n\n ctx.grad_shape = input_shape\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (CAt, subA, A)\n ctx.tensor_states = (SCAt, state.idx)\n else:\n ctx.tensors = [None, None, None]\n ctx.tensor_states = (None, None)\n ctx.save_for_backward(None, None)\n\n clone_func = torch.clone if len(output_shape) == 3 else lambda x: x\n return clone_func(output.view(output_shape))\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n CAt, subA, A = ctx.tensors\n SCAt, idx = ctx.tensor_states\n state = ctx.state\n grad_A = grad_B = grad_bias = None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n Cgrad, Cgradt, SCgrad, SCgradt, outlier_cols = F.int8_double_quant(grad_output.to(torch.float16))\n\n if req_gradB:\n # print('back A shape', A.shape)\n # print('grad output t shape', grad_output.t().shape)\n grad_B = torch.matmul(grad_output.t(), A)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)\n else:\n raise Exception(\"State must contain either CBt or CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef get_block_sizes(input_matrix, weight_matrix):\n input_features = input_matrix.shape[-1]\n output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n bsz, bsz2 = 1024, 1024\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n bsz2 = k\n break\n\n return bsz, bsz2\n\n\ndef matmul_fp8_global(","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.get_block_sizes","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.get_block_sizes#L340-L354","kind":"function","name":"get_block_sizes","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":340,"end_line":354,"context_start_line":320,"context_end_line":374,"code":" if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n Cgrad, Cgradt, SCgrad, SCgradt, outlier_cols = F.int8_double_quant(grad_output.to(torch.float16))\n\n if req_gradB:\n # print('back A shape', A.shape)\n # print('grad output t shape', grad_output.t().shape)\n grad_B = torch.matmul(grad_output.t(), A)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)\n else:\n raise Exception(\"State must contain either CBt or CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef get_block_sizes(input_matrix, weight_matrix):\n input_features = input_matrix.shape[-1]\n output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n bsz, bsz2 = 1024, 1024\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n bsz2 = k\n break\n\n return bsz, bsz2\n\n\ndef matmul_fp8_global(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef matmul_fp8_mixed(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.matmul_fp8_global","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.matmul_fp8_global#L357-L368","kind":"function","name":"matmul_fp8_global","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":357,"end_line":368,"context_start_line":337,"context_end_line":388,"code":" return grad_A, grad_B, None, grad_bias, None\n\n\ndef get_block_sizes(input_matrix, weight_matrix):\n input_features = input_matrix.shape[-1]\n output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n bsz, bsz2 = 1024, 1024\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n bsz2 = k\n break\n\n return bsz, bsz2\n\n\ndef matmul_fp8_global(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef matmul_fp8_mixed(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef switchback_bnb(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.matmul_fp8_mixed","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.matmul_fp8_mixed#L371-L382","kind":"function","name":"matmul_fp8_mixed","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":371,"end_line":382,"context_start_line":351,"context_end_line":396,"code":" bsz2 = k\n break\n\n return bsz, bsz2\n\n\ndef matmul_fp8_global(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef matmul_fp8_mixed(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef switchback_bnb(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias=None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n return SwitchBackBnb.apply(A, B, out, bias, state)","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.switchback_bnb","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.switchback_bnb#L385-L396","kind":"function","name":"switchback_bnb","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":385,"end_line":396,"context_start_line":365,"context_end_line":396,"code":"):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Global.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef matmul_fp8_mixed(\n A: torch.Tensor,\n B: torch.Tensor,\n fw_code: torch.Tensor,\n bw_code: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bsz: int = -1,\n bsz2: int = -1,\n):\n if bsz == -1 or bsz2 == -1:\n bsz, bsz2 = get_block_sizes(A, B)\n return MatMulFP8Mixed.apply(A, B, out, fw_code, bw_code, bsz, bsz2)\n\n\ndef switchback_bnb(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias=None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n return SwitchBackBnb.apply(A, B, out, bias, state)","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.forward","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.forward#L187-L301","kind":"function","name":"forward","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":187,"end_line":301,"context_start_line":167,"context_end_line":321,"code":" # fp8out_transpose = fp8out_transpose.view(grad_output.shape[0], grad_output.shape[1], grad_output.shape[2])\n\n # not supported by PyTorch. TODO: create work-around\n if req_gradA:\n grad_A = torch.matmul(fp8out, B.t().to(fp8out.dtype)).to(A.dtype)\n\n if req_gradB:\n if len(A.shape) == 3:\n At = A.transpose(2, 1).contiguous()\n else:\n At = A.transpose(1, 0).contiguous()\n cA, state = F.quantize(At.float(), code=ctx.fw_code)\n fp8At = F.dequantize(cA, state).to(A.dtype)\n grad_B = torch.matmul(fp8At.to(fp8out.dtype), fp8out).to(B.dtype)\n\n return grad_A, grad_B, None, None, None, None, None\n\n\nclass SwitchBackBnb(torch.autograd.Function):\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state: Optional[MatmulLtState] = None):\n state = state or MatmulLtState()\n\n # default to pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n if A.shape[-1] == B.shape[0]:\n return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Quantize A\n # 2. Quantize B\n # 3. Matmul\n # 4. Mixed-precision decomposition matmul\n # 5. Save state\n input_shape = A.shape\n if state.outlier_pool is None:\n state.outlier_pool = GlobalOutlierPooler.get_instance()\n\n # Cast A to fp16\n if A.dtype != torch.float16:\n warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n\n # 1. Quantize A\n if len(A.shape) == 3:\n A = A.view(-1, A.shape[-1]).contiguous()\n CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold)\n\n if state.threshold > 0.0 and outlier_cols is not None:\n if state.has_fp16_weights:\n idx = outlier_cols\n CA[:, idx] = 0\n subA = A[:, idx]\n state.subB = B[:, idx].t().contiguous()\n state.idx = idx\n else:\n if state.SB is None:\n state.SB = (state.CB.shape, \"row\")\n else:\n if not state.has_fp16_weights and state.SB is None:\n state.SB = (state.CB.shape, \"row\")\n subA = None\n\n # 2. Quantize B\n if state.has_fp16_weights:\n # print('B shape', B.shape)\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.SB is None:\n state.reset_grads()\n (\n state.CB,\n state.CBt,\n state.SCB,\n state.SCBt,\n _,\n ) = F.int8_double_quant(B.to(torch.float16))\n state.SB = (state.CB.shape, \"row\")\n else:\n has_grad = False\n\n if outlier_cols is not None and not state.has_fp16_weights:\n # extract outliers\n state.idx = outlier_cols\n outliers = state.CB[:, state.idx.long()].clone()\n state.subB = (outliers * state.SCB.view(-1, 1) / 127.0).t().contiguous().to(A.dtype)\n CA[:, state.idx.long()] = 0\n\n subA = A[:, state.idx.long()]\n\n shapeB = state.SB[0]\n\n if len(input_shape) == 3:\n output_shape = (input_shape[0], input_shape[1], shapeB[0])\n else:\n output_shape = (input_shape[0], shapeB[0])\n\n # 3. Matmul\n out32 = F.int8_linear_matmul(CA, state.CB)\n # we apply the fused bias here\n\n if bias is None or bias.dtype == torch.float16:\n output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=bias).to(A.dtype)\n else: # apply bias separately\n output = F.int8_mm_dequant(out32, SCA, state.SCB, bias=None).to(A.dtype)\n output.add_(bias)\n\n # 4. Mixed-precision decomposition matmul\n if outlier_cols is not None and subA is not None:\n output += torch.matmul(subA, state.subB)\n\n # 5. Save state\n ctx.state = state\n\n ctx.grad_shape = input_shape\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (CAt, subA, A)\n ctx.tensor_states = (SCAt, state.idx)\n else:\n ctx.tensors = [None, None, None]\n ctx.tensor_states = (None, None)\n ctx.save_for_backward(None, None)\n\n clone_func = torch.clone if len(output_shape) == 3 else lambda x: x\n return clone_func(output.view(output_shape))\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n CAt, subA, A = ctx.tensors\n SCAt, idx = ctx.tensor_states\n state = ctx.state\n grad_A = grad_B = grad_bias = None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.research.autograd._functions.backward","uri":"program://bitsandbytes/function/bitsandbytes.research.autograd._functions.backward#L304-L337","kind":"function","name":"backward","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":304,"end_line":337,"context_start_line":284,"context_end_line":357,"code":" output += torch.matmul(subA, state.subB)\n\n # 5. Save state\n ctx.state = state\n\n ctx.grad_shape = input_shape\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (CAt, subA, A)\n ctx.tensor_states = (SCAt, state.idx)\n else:\n ctx.tensors = [None, None, None]\n ctx.tensor_states = (None, None)\n ctx.save_for_backward(None, None)\n\n clone_func = torch.clone if len(output_shape) == 3 else lambda x: x\n return clone_func(output.view(output_shape))\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n CAt, subA, A = ctx.tensors\n SCAt, idx = ctx.tensor_states\n state = ctx.state\n grad_A = grad_B = grad_bias = None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n Cgrad, Cgradt, SCgrad, SCgradt, outlier_cols = F.int8_double_quant(grad_output.to(torch.float16))\n\n if req_gradB:\n # print('back A shape', A.shape)\n # print('grad output t shape', grad_output.t().shape)\n grad_B = torch.matmul(grad_output.t(), A)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output, CB).view(ctx.grad_shape).to(ctx.dtype_A)\n else:\n raise Exception(\"State must contain either CBt or CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef get_block_sizes(input_matrix, weight_matrix):\n input_features = input_matrix.shape[-1]\n output_features = weight_matrix.shape[0] if weight_matrix.shape[1] == input_features else weight_matrix.shape[1]\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n bsz, bsz2 = 1024, 1024\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n bsz = k\n break\n for i, k in enumerate(array):\n if output_features > array[i + 1]:\n bsz2 = k\n break\n\n return bsz, bsz2\n\n\ndef matmul_fp8_global(","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.rmsprop","uri":"program://bitsandbytes/module/bitsandbytes.optim.rmsprop#L1-L196","kind":"module","name":"bitsandbytes.optim.rmsprop","path":"bitsandbytes/optim/rmsprop.py","language":"python","start_line":1,"end_line":196,"context_start_line":1,"context_end_line":196,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass RMSprop(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Base RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"d9111e77e0669cd46be0938bde2d6bda5689c7726a8d047ff3950081cc597631","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.rmsprop.RMSprop","uri":"program://bitsandbytes/class/bitsandbytes.optim.rmsprop.RMSprop#L8-L69","kind":"class","name":"RMSprop","path":"bitsandbytes/optim/rmsprop.py","language":"python","start_line":8,"end_line":69,"context_start_line":1,"context_end_line":89,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass RMSprop(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Base RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit RMSprop optimizer.\n","source_hash":"d9111e77e0669cd46be0938bde2d6bda5689c7726a8d047ff3950081cc597631","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.rmsprop.RMSprop8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.rmsprop.RMSprop8bit#L72-L132","kind":"class","name":"RMSprop8bit","path":"bitsandbytes/optim/rmsprop.py","language":"python","start_line":72,"end_line":132,"context_start_line":52,"context_end_line":152,"code":" \"\"\"\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit RMSprop optimizer.\n","source_hash":"d9111e77e0669cd46be0938bde2d6bda5689c7726a8d047ff3950081cc597631","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.rmsprop.RMSprop32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.rmsprop.RMSprop32bit#L135-L196","kind":"class","name":"RMSprop32bit","path":"bitsandbytes/optim/rmsprop.py","language":"python","start_line":135,"end_line":196,"context_start_line":115,"context_end_line":196,"code":" \"\"\"\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"d9111e77e0669cd46be0938bde2d6bda5689c7726a8d047ff3950081cc597631","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.rmsprop.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.rmsprop.__init__#L136-L196","kind":"function","name":"__init__","path":"bitsandbytes/optim/rmsprop.py","language":"python","start_line":136,"end_line":196,"context_start_line":116,"context_end_line":196,"code":" if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass RMSprop32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit RMSprop optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n alpha (`float`, defaults to 0.99):\n The alpha value is the decay rate of the squared gradients of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n centered (`bool`, defaults to `False`):\n Whether the gradients are normalized by the variance. If `True`, it can help training at the expense of additional compute.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n\n if alpha == 0:\n raise NotImplementedError(\"RMSprop with alpha==0.0 is not supported!\")\n if centered:\n raise NotImplementedError(\"Centered RMSprop is not supported!\")\n super().__init__(\n \"rmsprop\",\n params,\n lr,\n (alpha, momentum),\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"d9111e77e0669cd46be0938bde2d6bda5689c7726a8d047ff3950081cc597631","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.sgd","uri":"program://bitsandbytes/module/bitsandbytes.optim.sgd#L1-L176","kind":"module","name":"bitsandbytes.optim.sgd","path":"bitsandbytes/optim/sgd.py","language":"python","start_line":1,"end_line":176,"context_start_line":1,"context_end_line":176,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass SGD(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Base SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"a88ba2bbbb5e98b90463fb0886a382c1e6c66005e9a0910da5404c74f2371e1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.sgd.SGD","uri":"program://bitsandbytes/class/bitsandbytes.optim.sgd.SGD#L8-L64","kind":"class","name":"SGD","path":"bitsandbytes/optim/sgd.py","language":"python","start_line":8,"end_line":64,"context_start_line":1,"context_end_line":84,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass SGD(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Base SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit SGD optimizer.\n\n Arguments:","source_hash":"a88ba2bbbb5e98b90463fb0886a382c1e6c66005e9a0910da5404c74f2371e1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.sgd.SGD8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.sgd.SGD8bit#L67-L120","kind":"class","name":"SGD8bit","path":"bitsandbytes/optim/sgd.py","language":"python","start_line":67,"end_line":120,"context_start_line":47,"context_end_line":140,"code":" block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit SGD optimizer.\n\n Arguments:","source_hash":"a88ba2bbbb5e98b90463fb0886a382c1e6c66005e9a0910da5404c74f2371e1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.sgd.SGD32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.sgd.SGD32bit#L123-L176","kind":"class","name":"SGD32bit","path":"bitsandbytes/optim/sgd.py","language":"python","start_line":123,"end_line":176,"context_start_line":103,"context_end_line":176,"code":" block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"a88ba2bbbb5e98b90463fb0886a382c1e6c66005e9a0910da5404c74f2371e1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.sgd.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.sgd.__init__#L124-L176","kind":"function","name":"__init__","path":"bitsandbytes/optim/sgd.py","language":"python","start_line":124,"end_line":176,"context_start_line":104,"context_end_line":176,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass SGD32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit SGD optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"SGD without momentum is not supported!\")\n super().__init__(\n \"momentum\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"a88ba2bbbb5e98b90463fb0886a382c1e6c66005e9a0910da5404c74f2371e1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw","uri":"program://bitsandbytes/module/bitsandbytes.optim.adamw#L1-L385","kind":"module","name":"bitsandbytes.optim.adamw","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":1,"end_line":385,"context_start_line":1,"context_end_line":385,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass AdamW(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Base AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass AdamW8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in AdamW8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in AdamW8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"AdamW8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since AdamW8bit always uses 8-bit optimization\n raise ValueError(\"AdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass AdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedAdamW(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 8-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in PagedAdamW8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in PagedAdamW8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"PagedAdamW8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since PagedAdamW8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.AdamW","uri":"program://bitsandbytes/class/bitsandbytes.optim.adamw.AdamW#L9-L67","kind":"class","name":"AdamW","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":9,"end_line":67,"context_start_line":1,"context_end_line":87,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass AdamW(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Base AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass AdamW8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit AdamW optimizer.","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.AdamW8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adamw.AdamW8bit#L70-L139","kind":"class","name":"AdamW8bit","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":70,"end_line":139,"context_start_line":50,"context_end_line":159,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass AdamW8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in AdamW8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in AdamW8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"AdamW8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since AdamW8bit always uses 8-bit optimization\n raise ValueError(\"AdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass AdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit AdamW optimizer.","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.AdamW32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adamw.AdamW32bit#L142-L200","kind":"class","name":"AdamW32bit","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":142,"end_line":200,"context_start_line":122,"context_end_line":220,"code":" # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since AdamW8bit always uses 8-bit optimization\n raise ValueError(\"AdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass AdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedAdamW(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged AdamW optimizer.\n","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.PagedAdamW","uri":"program://bitsandbytes/class/bitsandbytes.optim.adamw.PagedAdamW#L203-L258","kind":"class","name":"PagedAdamW","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":203,"end_line":258,"context_start_line":183,"context_end_line":278,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedAdamW(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 8-bit AdamW optimizer.\n","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.PagedAdamW8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adamw.PagedAdamW8bit#L261-L327","kind":"class","name":"PagedAdamW8bit","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":261,"end_line":327,"context_start_line":241,"context_end_line":347,"code":" Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 8-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in PagedAdamW8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in PagedAdamW8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"PagedAdamW8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since PagedAdamW8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit AdamW optimizer.\n","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.PagedAdamW32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adamw.PagedAdamW32bit#L330-L385","kind":"class","name":"PagedAdamW32bit","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":330,"end_line":385,"context_start_line":310,"context_end_line":385,"code":" # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since PagedAdamW8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adamw.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.adamw.__init__#L331-L385","kind":"function","name":"__init__","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":331,"end_line":385,"context_start_line":311,"context_end_line":385,"code":" # but any other value is invalid since PagedAdamW8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdamW8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdamW32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit AdamW optimizer.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion","uri":"program://bitsandbytes/module/bitsandbytes.optim.lion#L1-L318","kind":"module","name":"bitsandbytes.optim.lion","path":"bitsandbytes/optim/lion.py","language":"python","start_line":1,"end_line":318,"context_start_line":1,"context_end_line":318,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass Lion(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Base Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Lion8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Lion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedLion(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 8-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.Lion","uri":"program://bitsandbytes/class/bitsandbytes.optim.lion.Lion#L8-L60","kind":"class","name":"Lion","path":"bitsandbytes/optim/lion.py","language":"python","start_line":8,"end_line":60,"context_start_line":1,"context_end_line":80,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass Lion(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Base Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Lion8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.Lion8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lion.Lion8bit#L63-L112","kind":"class","name":"Lion8bit","path":"bitsandbytes/optim/lion.py","language":"python","start_line":63,"end_line":112,"context_start_line":43,"context_end_line":132,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Lion8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Lion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.Lion32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lion.Lion32bit#L115-L164","kind":"class","name":"Lion32bit","path":"bitsandbytes/optim/lion.py","language":"python","start_line":115,"end_line":164,"context_start_line":95,"context_end_line":184,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Lion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedLion(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.PagedLion","uri":"program://bitsandbytes/class/bitsandbytes.optim.lion.PagedLion#L167-L216","kind":"class","name":"PagedLion","path":"bitsandbytes/optim/lion.py","language":"python","start_line":167,"end_line":216,"context_start_line":147,"context_end_line":236,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedLion(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 8-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.PagedLion8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lion.PagedLion8bit#L219-L267","kind":"class","name":"PagedLion8bit","path":"bitsandbytes/optim/lion.py","language":"python","start_line":219,"end_line":267,"context_start_line":199,"context_end_line":287,"code":" Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 8-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.PagedLion32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lion.PagedLion32bit#L270-L318","kind":"class","name":"PagedLion32bit","path":"bitsandbytes/optim/lion.py","language":"python","start_line":270,"end_line":318,"context_start_line":250,"context_end_line":318,"code":" Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lion.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.lion.__init__#L271-L318","kind":"function","name":"__init__","path":"bitsandbytes/optim/lion.py","language":"python","start_line":271,"end_line":318,"context_start_line":251,"context_end_line":318,"code":" block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedLion32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Paged 32-bit Lion optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-4):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n weight_decay (`float`, defaults to 0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n super().__init__(\n \"lion\",\n params,\n lr,\n betas,\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix","uri":"program://bitsandbytes/module/bitsandbytes.optim.ademamix#L1-L416","kind":"module","name":"bitsandbytes.optim.ademamix","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":1,"end_line":416,"context_start_line":1,"context_end_line":416,"code":"from collections.abc import Iterable\nimport math\nfrom typing import Literal, Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass _ReferenceAdEMAMix(torch.optim.Optimizer):\n \"\"\"\n Reference: https://hf.co/papers/2409.03137\n \"\"\"\n\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n eps: float = 1e-8,\n weight_decay: float = 1e-2, # default 0.0 or 1e-2?\n t_beta3: Optional[int] = None,\n t_alpha: Optional[int] = None,\n ):\n defaults = dict(\n lr=lr, betas=betas, alpha=alpha, eps=eps, weight_decay=weight_decay, t_beta3=t_beta3, t_alpha=t_alpha\n )\n\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n if \"step\" in group:\n group[\"step\"] += 1\n else:\n group[\"step\"] = 1\n\n lr = group[\"lr\"]\n eps = group[\"eps\"]\n beta1, beta2, beta3 = group[\"betas\"]\n alpha = group[\"alpha\"]\n t_alpha = group[\"t_alpha\"]\n t_beta3 = group[\"t_beta3\"]\n weight_decay = group[\"weight_decay\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n grad = p.grad\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n # For parity with bnb implementation we combine both fast\n # and slow EMA stats into one stacked tensor.\n state[\"m1_m2\"] = p.new_zeros((2, *p.size()))\n state[\"nu\"] = torch.zeros_like(p) # second moment estimate\n\n m1, m2, nu = state[\"m1_m2\"][0], state[\"m1_m2\"][1], state[\"nu\"]\n\n bias_correction1 = 1 - beta1 ** group[\"step\"]\n\n bias_correction2 = 1 - beta2 ** group[\"step\"]\n\n # Apply scheduler for alpha\n if t_alpha is not None:\n alpha = min(group[\"step\"] * alpha / t_alpha, alpha)\n\n # Apply scheduler for beta3\n if t_beta3 is not None:\n ln_beta1 = math.log(beta1)\n ln_beta3 = math.log(beta3)\n step_scale = group[\"step\"] / t_beta3\n beta3 = min(\n math.exp((ln_beta1 * ln_beta3) / (((1 - step_scale) * ln_beta3) + (step_scale * ln_beta1))),\n beta3,\n )\n\n # Update the EMAs\n m1.mul_(beta1).add_(grad, alpha=1 - beta1)\n m2.mul_(beta3).add_(grad, alpha=1 - beta3)\n nu.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # Compute step\n denom = (nu.sqrt() / (bias_correction2**0.5)).add(eps)\n update = (m1.div(bias_correction1) + alpha * m2) / denom\n\n # Add weight decay\n update.add_(p, alpha=weight_decay)\n\n # Apply update scaled by learning rate\n p.add_(-lr * update)\n\n return loss\n\n\nclass AdEMAMix(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=optim_bits,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n # In our AdEMAMix implementation, we use `state` to hold\n # both the fast and slow EMAs. Here we override the base\n # `Optimizer2State` to allocate a buffer twice as large.\n # Additional consideration: we do not support block_wise=False,\n # percentile clipping, or max_unorm.\n\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.uint8:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = state[\"qmap1\"] = self.name2qmap[\"dynamic\"].to(p.device)\n self.name2qmap[\"udynamic\"] = state[\"qmap2\"] = self.name2qmap[\"udynamic\"].to(p.device)\n\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((2, blocks), dtype=torch.float32, device=p.device)\n state[\"absmax2\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n\n state[\"state1\"] = self._get_state_double_buffer(p, dtype=dtype)\n state[\"state2\"] = self.get_state_buffer(p, dtype=dtype)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"t_alpha\"] is None and config[\"t_beta3\"] is None:\n # Not using alpha/beta3 scheduler; we can fall through.\n super().update_step(group, p, gindex, pindex)\n return\n\n # Ensure contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n beta1, beta2, beta3 = config[\"betas\"]\n alpha = config[\"alpha\"]\n t_alpha = config[\"t_alpha\"]\n t_beta3 = config[\"t_beta3\"]\n\n # Apply scheduler for alpha\n if t_alpha is not None:\n alpha_t = min(step * alpha / t_alpha, alpha)\n else:\n alpha_t = alpha\n\n # Apply scheduler for beta3\n if t_beta3 is not None:\n ln_beta1 = math.log(beta1)\n ln_beta3 = math.log(beta3)\n step_scale = step / t_beta3\n beta3_t = min(\n math.exp((ln_beta1 * ln_beta3) / (((1 - step_scale) * ln_beta3) + (step_scale * ln_beta1))), beta3\n )\n else:\n beta3_t = beta3\n\n # Apply updates\n if state[\"state1\"].dtype == torch.float32:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n beta1,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"state2\"],\n beta2,\n beta3_t,\n alpha_t,\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n unorm_vec=state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n skip_zeros=config[\"skip_zeros\"],\n )\n elif state[\"state1\"].dtype == torch.uint8:\n F.optimizer_update_8bit_blockwise(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n beta3_t,\n alpha_t,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n def _get_state_double_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros((2, *p.size()), dtype=dtype, device=p.device)\n else:\n buff = F.get_paged(*(2, *p.size()), dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n\nclass AdEMAMix8bit(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=8,\n min_8bit_size=min_8bit_size,\n is_paged=is_paged,\n )\n\n\nclass PagedAdEMAMix8bit(AdEMAMix8bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )\n\n\nclass PagedAdEMAMix(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=optim_bits,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )\n\n\nclass AdEMAMix32bit(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=32,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n\nclass PagedAdEMAMix32bit(AdEMAMix32bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix._ReferenceAdEMAMix","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix._ReferenceAdEMAMix#L11-L104","kind":"class","name":"_ReferenceAdEMAMix","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":11,"end_line":104,"context_start_line":1,"context_end_line":124,"code":"from collections.abc import Iterable\nimport math\nfrom typing import Literal, Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass _ReferenceAdEMAMix(torch.optim.Optimizer):\n \"\"\"\n Reference: https://hf.co/papers/2409.03137\n \"\"\"\n\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n eps: float = 1e-8,\n weight_decay: float = 1e-2, # default 0.0 or 1e-2?\n t_beta3: Optional[int] = None,\n t_alpha: Optional[int] = None,\n ):\n defaults = dict(\n lr=lr, betas=betas, alpha=alpha, eps=eps, weight_decay=weight_decay, t_beta3=t_beta3, t_alpha=t_alpha\n )\n\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n if \"step\" in group:\n group[\"step\"] += 1\n else:\n group[\"step\"] = 1\n\n lr = group[\"lr\"]\n eps = group[\"eps\"]\n beta1, beta2, beta3 = group[\"betas\"]\n alpha = group[\"alpha\"]\n t_alpha = group[\"t_alpha\"]\n t_beta3 = group[\"t_beta3\"]\n weight_decay = group[\"weight_decay\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n grad = p.grad\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n # For parity with bnb implementation we combine both fast\n # and slow EMA stats into one stacked tensor.\n state[\"m1_m2\"] = p.new_zeros((2, *p.size()))\n state[\"nu\"] = torch.zeros_like(p) # second moment estimate\n\n m1, m2, nu = state[\"m1_m2\"][0], state[\"m1_m2\"][1], state[\"nu\"]\n\n bias_correction1 = 1 - beta1 ** group[\"step\"]\n\n bias_correction2 = 1 - beta2 ** group[\"step\"]\n\n # Apply scheduler for alpha\n if t_alpha is not None:\n alpha = min(group[\"step\"] * alpha / t_alpha, alpha)\n\n # Apply scheduler for beta3\n if t_beta3 is not None:\n ln_beta1 = math.log(beta1)\n ln_beta3 = math.log(beta3)\n step_scale = group[\"step\"] / t_beta3\n beta3 = min(\n math.exp((ln_beta1 * ln_beta3) / (((1 - step_scale) * ln_beta3) + (step_scale * ln_beta1))),\n beta3,\n )\n\n # Update the EMAs\n m1.mul_(beta1).add_(grad, alpha=1 - beta1)\n m2.mul_(beta3).add_(grad, alpha=1 - beta3)\n nu.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # Compute step\n denom = (nu.sqrt() / (bias_correction2**0.5)).add(eps)\n update = (m1.div(bias_correction1) + alpha * m2) / denom\n\n # Add weight decay\n update.add_(p, alpha=weight_decay)\n\n # Apply update scaled by learning rate\n p.add_(-lr * update)\n\n return loss\n\n\nclass AdEMAMix(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.AdEMAMix","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix.AdEMAMix#L107-L271","kind":"class","name":"AdEMAMix","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":107,"end_line":271,"context_start_line":87,"context_end_line":291,"code":" )\n\n # Update the EMAs\n m1.mul_(beta1).add_(grad, alpha=1 - beta1)\n m2.mul_(beta3).add_(grad, alpha=1 - beta3)\n nu.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # Compute step\n denom = (nu.sqrt() / (bias_correction2**0.5)).add(eps)\n update = (m1.div(bias_correction1) + alpha * m2) / denom\n\n # Add weight decay\n update.add_(p, alpha=weight_decay)\n\n # Apply update scaled by learning rate\n p.add_(-lr * update)\n\n return loss\n\n\nclass AdEMAMix(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=optim_bits,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n # In our AdEMAMix implementation, we use `state` to hold\n # both the fast and slow EMAs. Here we override the base\n # `Optimizer2State` to allocate a buffer twice as large.\n # Additional consideration: we do not support block_wise=False,\n # percentile clipping, or max_unorm.\n\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.uint8:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = state[\"qmap1\"] = self.name2qmap[\"dynamic\"].to(p.device)\n self.name2qmap[\"udynamic\"] = state[\"qmap2\"] = self.name2qmap[\"udynamic\"].to(p.device)\n\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((2, blocks), dtype=torch.float32, device=p.device)\n state[\"absmax2\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n\n state[\"state1\"] = self._get_state_double_buffer(p, dtype=dtype)\n state[\"state2\"] = self.get_state_buffer(p, dtype=dtype)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"t_alpha\"] is None and config[\"t_beta3\"] is None:\n # Not using alpha/beta3 scheduler; we can fall through.\n super().update_step(group, p, gindex, pindex)\n return\n\n # Ensure contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n beta1, beta2, beta3 = config[\"betas\"]\n alpha = config[\"alpha\"]\n t_alpha = config[\"t_alpha\"]\n t_beta3 = config[\"t_beta3\"]\n\n # Apply scheduler for alpha\n if t_alpha is not None:\n alpha_t = min(step * alpha / t_alpha, alpha)\n else:\n alpha_t = alpha\n\n # Apply scheduler for beta3\n if t_beta3 is not None:\n ln_beta1 = math.log(beta1)\n ln_beta3 = math.log(beta3)\n step_scale = step / t_beta3\n beta3_t = min(\n math.exp((ln_beta1 * ln_beta3) / (((1 - step_scale) * ln_beta3) + (step_scale * ln_beta1))), beta3\n )\n else:\n beta3_t = beta3\n\n # Apply updates\n if state[\"state1\"].dtype == torch.float32:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n beta1,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"state2\"],\n beta2,\n beta3_t,\n alpha_t,\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n unorm_vec=state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n skip_zeros=config[\"skip_zeros\"],\n )\n elif state[\"state1\"].dtype == torch.uint8:\n F.optimizer_update_8bit_blockwise(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n beta3_t,\n alpha_t,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n def _get_state_double_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros((2, *p.size()), dtype=dtype, device=p.device)\n else:\n buff = F.get_paged(*(2, *p.size()), dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n\nclass AdEMAMix8bit(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.AdEMAMix8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix.AdEMAMix8bit#L274-L300","kind":"class","name":"AdEMAMix8bit","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":274,"end_line":300,"context_start_line":254,"context_end_line":320,"code":" config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n def _get_state_double_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros((2, *p.size()), dtype=dtype, device=p.device)\n else:\n buff = F.get_paged(*(2, *p.size()), dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n\nclass AdEMAMix8bit(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=8,\n min_8bit_size=min_8bit_size,\n is_paged=is_paged,\n )\n\n\nclass PagedAdEMAMix8bit(AdEMAMix8bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.PagedAdEMAMix8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix.PagedAdEMAMix8bit#L303-L327","kind":"class","name":"PagedAdEMAMix8bit","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":303,"end_line":327,"context_start_line":283,"context_end_line":347,"code":" eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=8,\n min_8bit_size=min_8bit_size,\n is_paged=is_paged,\n )\n\n\nclass PagedAdEMAMix8bit(AdEMAMix8bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )\n\n\nclass PagedAdEMAMix(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.PagedAdEMAMix","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix.PagedAdEMAMix#L330-L356","kind":"class","name":"PagedAdEMAMix","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":330,"end_line":356,"context_start_line":310,"context_end_line":376,"code":" t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )\n\n\nclass PagedAdEMAMix(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=optim_bits,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )\n\n\nclass AdEMAMix32bit(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.AdEMAMix32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix.AdEMAMix32bit#L359-L389","kind":"class","name":"AdEMAMix32bit","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":359,"end_line":389,"context_start_line":339,"context_end_line":409,"code":" eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=optim_bits,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )\n\n\nclass AdEMAMix32bit(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=32,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n\nclass PagedAdEMAMix32bit(AdEMAMix32bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.PagedAdEMAMix32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.ademamix.PagedAdEMAMix32bit#L392-L416","kind":"class","name":"PagedAdEMAMix32bit","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":392,"end_line":416,"context_start_line":372,"context_end_line":416,"code":" ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=32,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n\nclass PagedAdEMAMix32bit(AdEMAMix32bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.ademamix.__init__#L393-L416","kind":"function","name":"__init__","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":393,"end_line":416,"context_start_line":373,"context_end_line":416,"code":" super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=32,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n\nclass PagedAdEMAMix32bit(AdEMAMix32bit):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n eps=eps,\n weight_decay=weight_decay,\n min_8bit_size=min_8bit_size,\n is_paged=True,\n )","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.step","uri":"program://bitsandbytes/function/bitsandbytes.optim.ademamix.step#L34-L104","kind":"function","name":"step","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":34,"end_line":104,"context_start_line":14,"context_end_line":124,"code":" \"\"\"\n\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n eps: float = 1e-8,\n weight_decay: float = 1e-2, # default 0.0 or 1e-2?\n t_beta3: Optional[int] = None,\n t_alpha: Optional[int] = None,\n ):\n defaults = dict(\n lr=lr, betas=betas, alpha=alpha, eps=eps, weight_decay=weight_decay, t_beta3=t_beta3, t_alpha=t_alpha\n )\n\n super().__init__(params, defaults)\n\n @torch.no_grad()\n def step(self, closure=None):\n loss = None\n\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n if \"step\" in group:\n group[\"step\"] += 1\n else:\n group[\"step\"] = 1\n\n lr = group[\"lr\"]\n eps = group[\"eps\"]\n beta1, beta2, beta3 = group[\"betas\"]\n alpha = group[\"alpha\"]\n t_alpha = group[\"t_alpha\"]\n t_beta3 = group[\"t_beta3\"]\n weight_decay = group[\"weight_decay\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n grad = p.grad\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n # For parity with bnb implementation we combine both fast\n # and slow EMA stats into one stacked tensor.\n state[\"m1_m2\"] = p.new_zeros((2, *p.size()))\n state[\"nu\"] = torch.zeros_like(p) # second moment estimate\n\n m1, m2, nu = state[\"m1_m2\"][0], state[\"m1_m2\"][1], state[\"nu\"]\n\n bias_correction1 = 1 - beta1 ** group[\"step\"]\n\n bias_correction2 = 1 - beta2 ** group[\"step\"]\n\n # Apply scheduler for alpha\n if t_alpha is not None:\n alpha = min(group[\"step\"] * alpha / t_alpha, alpha)\n\n # Apply scheduler for beta3\n if t_beta3 is not None:\n ln_beta1 = math.log(beta1)\n ln_beta3 = math.log(beta3)\n step_scale = group[\"step\"] / t_beta3\n beta3 = min(\n math.exp((ln_beta1 * ln_beta3) / (((1 - step_scale) * ln_beta3) + (step_scale * ln_beta1))),\n beta3,\n )\n\n # Update the EMAs\n m1.mul_(beta1).add_(grad, alpha=1 - beta1)\n m2.mul_(beta3).add_(grad, alpha=1 - beta3)\n nu.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # Compute step\n denom = (nu.sqrt() / (bias_correction2**0.5)).add(eps)\n update = (m1.div(bias_correction1) + alpha * m2) / denom\n\n # Add weight decay\n update.add_(p, alpha=weight_decay)\n\n # Apply update scaled by learning rate\n p.add_(-lr * update)\n\n return loss\n\n\nclass AdEMAMix(Optimizer2State):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n optim_bits: Literal[8, 32] = 32,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n \"ademamix\",\n params=params,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.init_state","uri":"program://bitsandbytes/function/bitsandbytes.optim.ademamix.init_state#L141-L177","kind":"function","name":"init_state","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":141,"end_line":177,"context_start_line":121,"context_end_line":197,"code":" ):\n super().__init__(\n \"ademamix\",\n params=params,\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n optim_bits=optim_bits,\n args=None,\n min_8bit_size=min_8bit_size,\n percentile_clipping=100,\n block_wise=True,\n is_paged=is_paged,\n alpha=alpha,\n t_alpha=t_alpha,\n t_beta3=t_beta3,\n )\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n # In our AdEMAMix implementation, we use `state` to hold\n # both the fast and slow EMAs. Here we override the base\n # `Optimizer2State` to allocate a buffer twice as large.\n # Additional consideration: we do not support block_wise=False,\n # percentile clipping, or max_unorm.\n\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.uint8:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = state[\"qmap1\"] = self.name2qmap[\"dynamic\"].to(p.device)\n self.name2qmap[\"udynamic\"] = state[\"qmap2\"] = self.name2qmap[\"udynamic\"].to(p.device)\n\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((2, blocks), dtype=torch.float32, device=p.device)\n state[\"absmax2\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n\n state[\"state1\"] = self._get_state_double_buffer(p, dtype=dtype)\n state[\"state2\"] = self.get_state_buffer(p, dtype=dtype)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"t_alpha\"] is None and config[\"t_beta3\"] is None:\n # Not using alpha/beta3 scheduler; we can fall through.\n super().update_step(group, p, gindex, pindex)\n return\n\n # Ensure contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n state[\"step\"] += 1\n step = state[\"step\"]\n","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix.update_step","uri":"program://bitsandbytes/function/bitsandbytes.optim.ademamix.update_step#L180-L262","kind":"function","name":"update_step","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":180,"end_line":262,"context_start_line":160,"context_end_line":282,"code":" state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.uint8:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = state[\"qmap1\"] = self.name2qmap[\"dynamic\"].to(p.device)\n self.name2qmap[\"udynamic\"] = state[\"qmap2\"] = self.name2qmap[\"udynamic\"].to(p.device)\n\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((2, blocks), dtype=torch.float32, device=p.device)\n state[\"absmax2\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n\n state[\"state1\"] = self._get_state_double_buffer(p, dtype=dtype)\n state[\"state2\"] = self.get_state_buffer(p, dtype=dtype)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"t_alpha\"] is None and config[\"t_beta3\"] is None:\n # Not using alpha/beta3 scheduler; we can fall through.\n super().update_step(group, p, gindex, pindex)\n return\n\n # Ensure contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n beta1, beta2, beta3 = config[\"betas\"]\n alpha = config[\"alpha\"]\n t_alpha = config[\"t_alpha\"]\n t_beta3 = config[\"t_beta3\"]\n\n # Apply scheduler for alpha\n if t_alpha is not None:\n alpha_t = min(step * alpha / t_alpha, alpha)\n else:\n alpha_t = alpha\n\n # Apply scheduler for beta3\n if t_beta3 is not None:\n ln_beta1 = math.log(beta1)\n ln_beta3 = math.log(beta3)\n step_scale = step / t_beta3\n beta3_t = min(\n math.exp((ln_beta1 * ln_beta3) / (((1 - step_scale) * ln_beta3) + (step_scale * ln_beta1))), beta3\n )\n else:\n beta3_t = beta3\n\n # Apply updates\n if state[\"state1\"].dtype == torch.float32:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n beta1,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"state2\"],\n beta2,\n beta3_t,\n alpha_t,\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n unorm_vec=state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n skip_zeros=config[\"skip_zeros\"],\n )\n elif state[\"state1\"].dtype == torch.uint8:\n F.optimizer_update_8bit_blockwise(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n beta3_t,\n alpha_t,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n def _get_state_double_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros((2, *p.size()), dtype=dtype, device=p.device)\n else:\n buff = F.get_paged(*(2, *p.size()), dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n\nclass AdEMAMix8bit(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.ademamix._get_state_double_buffer","uri":"program://bitsandbytes/function/bitsandbytes.optim.ademamix._get_state_double_buffer#L264-L271","kind":"function","name":"_get_state_double_buffer","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":264,"end_line":271,"context_start_line":244,"context_end_line":291,"code":" grad,\n p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n beta3_t,\n alpha_t,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=1.0,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n def _get_state_double_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros((2, *p.size()), dtype=dtype, device=p.device)\n else:\n buff = F.get_paged(*(2, *p.size()), dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n\nclass AdEMAMix8bit(AdEMAMix):\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n eps: float = 1e-8,\n weight_decay: float = 1e-2,\n min_8bit_size: int = 4096,\n is_paged: bool = False,\n ):\n super().__init__(\n params,\n lr=lr,\n betas=betas,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam","uri":"program://bitsandbytes/module/bitsandbytes.optim.adam#L1-L394","kind":"module","name":"bitsandbytes.optim.adam","path":"bitsandbytes/optim/adam.py","language":"python","start_line":1,"end_line":394,"context_start_line":1,"context_end_line":394,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass Adam(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Base Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Adam8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in Adam8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in Adam8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"Adam8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since Adam8bit always uses 8-bit optimization\n raise ValueError(\"Adam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Adam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedAdam(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit paged Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in PagedAdam8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in PagedAdam8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"PagedAdam8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since PagedAdam8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged 32-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.Adam","uri":"program://bitsandbytes/class/bitsandbytes.optim.adam.Adam#L9-L67","kind":"class","name":"Adam","path":"bitsandbytes/optim/adam.py","language":"python","start_line":9,"end_line":67,"context_start_line":1,"context_end_line":87,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass Adam(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Base Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Adam8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit Adam optimizer.","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.Adam8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adam.Adam8bit#L70-L139","kind":"class","name":"Adam8bit","path":"bitsandbytes/optim/adam.py","language":"python","start_line":70,"end_line":139,"context_start_line":50,"context_end_line":159,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Adam8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in Adam8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in Adam8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"Adam8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since Adam8bit always uses 8-bit optimization\n raise ValueError(\"Adam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Adam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit Adam optimizer.","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.Adam32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adam.Adam32bit#L142-L200","kind":"class","name":"Adam32bit","path":"bitsandbytes/optim/adam.py","language":"python","start_line":142,"end_line":200,"context_start_line":122,"context_end_line":220,"code":" # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since Adam8bit always uses 8-bit optimization\n raise ValueError(\"Adam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass Adam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 32-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedAdam(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged Adam optimizer.","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.PagedAdam","uri":"program://bitsandbytes/class/bitsandbytes.optim.adam.PagedAdam#L203-L261","kind":"class","name":"PagedAdam","path":"bitsandbytes/optim/adam.py","language":"python","start_line":203,"end_line":261,"context_start_line":183,"context_end_line":281,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=is_paged,\n )\n\n\nclass PagedAdam(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit paged Adam optimizer.","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.PagedAdam8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adam.PagedAdam8bit#L264-L333","kind":"class","name":"PagedAdam8bit","path":"bitsandbytes/optim/adam.py","language":"python","start_line":264,"end_line":333,"context_start_line":244,"context_end_line":353,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n 8-bit paged Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n Note: This parameter is not supported in PagedAdam8bit and must be False.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n Note: This parameter is not used in PagedAdam8bit as it always uses 8-bit optimization.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n # Validate unsupported parameters\n if amsgrad:\n raise ValueError(\"PagedAdam8bit does not support amsgrad=True\")\n\n if optim_bits != 32:\n # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since PagedAdam8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged 32-bit Adam optimizer.","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.PagedAdam32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adam.PagedAdam32bit#L336-L394","kind":"class","name":"PagedAdam32bit","path":"bitsandbytes/optim/adam.py","language":"python","start_line":336,"end_line":394,"context_start_line":316,"context_end_line":394,"code":" # We allow the default value of 32 to maintain compatibility with the function signature,\n # but any other value is invalid since PagedAdam8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged 32-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adam.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.adam.__init__#L337-L394","kind":"function","name":"__init__","path":"bitsandbytes/optim/adam.py","language":"python","start_line":337,"end_line":394,"context_start_line":317,"context_end_line":394,"code":" # but any other value is invalid since PagedAdam8bit always uses 8-bit optimization\n raise ValueError(\"PagedAdam8bit only supports optim_bits=32 (default value for compatibility)\")\n\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8, # Hardcoded to 8 bits\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )\n\n\nclass PagedAdam32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):\n \"\"\"\n Paged 32-bit Adam optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(\n \"adam\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n is_paged=True,\n )","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lamb","uri":"program://bitsandbytes/module/bitsandbytes.optim.lamb#L1-L200","kind":"module","name":"bitsandbytes.optim.lamb","path":"bitsandbytes/optim/lamb.py","language":"python","start_line":1,"end_line":200,"context_start_line":1,"context_end_line":200,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass LAMB(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n Base LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n 8-bit LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n 32-bit LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )","source_hash":"be62855caa66294b83930a82db74e69bde830af5aee35df479a7ea40c4480d9d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lamb.LAMB","uri":"program://bitsandbytes/class/bitsandbytes.optim.lamb.LAMB#L8-L72","kind":"class","name":"LAMB","path":"bitsandbytes/optim/lamb.py","language":"python","start_line":8,"end_line":72,"context_start_line":1,"context_end_line":92,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass LAMB(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n Base LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"","source_hash":"be62855caa66294b83930a82db74e69bde830af5aee35df479a7ea40c4480d9d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lamb.LAMB8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lamb.LAMB8bit#L75-L136","kind":"class","name":"LAMB8bit","path":"bitsandbytes/optim/lamb.py","language":"python","start_line":75,"end_line":136,"context_start_line":55,"context_end_line":156,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB8bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n 8-bit LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"","source_hash":"be62855caa66294b83930a82db74e69bde830af5aee35df479a7ea40c4480d9d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lamb.LAMB32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lamb.LAMB32bit#L139-L200","kind":"class","name":"LAMB32bit","path":"bitsandbytes/optim/lamb.py","language":"python","start_line":139,"end_line":200,"context_start_line":119,"context_end_line":200,"code":" Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n 32-bit LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )","source_hash":"be62855caa66294b83930a82db74e69bde830af5aee35df479a7ea40c4480d9d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lamb.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.lamb.__init__#L140-L200","kind":"function","name":"__init__","path":"bitsandbytes/optim/lamb.py","language":"python","start_line":140,"end_line":200,"context_start_line":120,"context_end_line":200,"code":" max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )\n\n\nclass LAMB32bit(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=False,\n max_unorm=1.0,\n ):\n \"\"\"\n 32-bit LAMB optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n bias_correction (`bool`, defaults to `True`):\n Whether to apply bias correction to the first and second-order moments.\n betas (`tuple(float, float)`, defaults to (0.9, 0.999)):\n The beta values are the decay rates of the first and second-order moment of the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value prevents division by zero in the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n amsgrad (`bool`, defaults to `False`):\n Whether to use the [AMSGrad](https://hf.co/papers/1904.09237) variant of Adam that uses the maximum of past squared gradients instead.\n adam_w_mode (`bool`, defaults to `True`):\n Whether to use the AdamW variant.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 1.0):\n The maximum gradient norm.\n \"\"\"\n super().__init__(\n \"lamb\",\n params,\n lr,\n betas,\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n max_unorm=1.0,\n )","source_hash":"be62855caa66294b83930a82db74e69bde830af5aee35df479a7ea40c4480d9d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer","uri":"program://bitsandbytes/module/bitsandbytes.optim.optimizer#L1-L804","kind":"module","name":"bitsandbytes.optim.optimizer","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":1,"end_line":804,"context_start_line":1,"context_end_line":804,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections import abc as container_abcs, defaultdict\nfrom copy import deepcopy\nfrom itertools import chain\nfrom typing import Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass GlobalOptimManager:\n \"\"\"\n A global optimizer manager for enabling custom optimizer configs.\n \"\"\"\n\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.pid2config = {}\n self.index2config = {}\n self.optimizer = None\n self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def register_parameters(self, params):\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{\"params\": param_groups}]\n\n for group_index, group in enumerate(param_groups):\n for p_index, p in enumerate(group[\"params\"]):\n if id(p) in self.pid2config:\n self.index2config[(group_index, p_index)] = self.pid2config[id(p)]\n\n def override_config(self, parameters, key=None, value=None, key_value_dict=None):\n \"\"\"\n Override initial optimizer config with specific hyperparameters.\n\n The key-values of the optimizer config for the input parameters are overridden\n This can be both, optimizer parameters like `betas` or `lr`, or it can be\n 8-bit specific parameters like `optim_bits` or `percentile_clipping`.\n\n Arguments:\n parameters (`torch.Tensor` or `list(torch.Tensors)`):\n The input parameters.\n key (`str`):\n The hyperparameter to override.\n value:\n The hyperparameter value.\n key_value_dict (`dict`):\n A dictionary with multiple key-values to override.\n\n Example:\n\n ```py\n import torch\n import bitsandbytes as bnb\n\n mng = bnb.optim.GlobalOptimManager.get_instance()\n\n model = MyModel()\n mng.register_parameters(model.parameters()) # 1. register parameters while still on CPU\n\n model = model.cuda()\n # use 8-bit optimizer states for all parameters\n adam = bnb.optim.Adam(model.parameters(), lr=0.001, optim_bits=8)\n\n # 2. override: the parameter model.fc1.weight now uses 32-bit Adam\n mng.override_config(model.fc1.weight, 'optim_bits', 32)\n ```\n \"\"\"\n self.uses_config_override = True\n if isinstance(parameters, torch.nn.Parameter):\n parameters = [parameters]\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n if key is not None and value is not None:\n assert key_value_dict is None\n key_value_dict = {key: value}\n\n if key_value_dict is not None:\n for p in parameters:\n if id(p) in self.pid2config:\n self.pid2config[id(p)].update(key_value_dict)\n else:\n self.pid2config[id(p)] = key_value_dict\n\n def register_module_override(self, module, param_name, config):\n self.module_weight_config_triple.append((module, param_name, config))\n\n\nclass Optimizer8bit(torch.optim.Optimizer):\n def __init__(self, params, defaults, optim_bits=32, is_paged=False):\n \"\"\"\n Base 8-bit optimizer class.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(params, defaults)\n self.initialized = False\n self.name2qmap = {}\n self.is_paged = is_paged\n self.page_mng = F.GlobalPageManager.get_instance()\n\n self.mng = GlobalOptimManager.get_instance()\n self.non_castable_tensor_keys = {\n \"qmap1\",\n \"qmap2\",\n \"max1\",\n \"max2\",\n \"new_max1\",\n \"new_max2\",\n \"state1\",\n \"state2\",\n \"gnorm_vec\",\n \"absmax1\",\n \"absmax2\",\n \"unorm_vec\",\n }\n\n if optim_bits == 8:\n self.fill_qmap()\n\n def fill_qmap(self):\n self.name2qmap[\"dynamic\"] = F.create_dynamic_map(signed=True)\n self.name2qmap[\"udynamic\"] = F.create_dynamic_map(signed=False)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n\n def load_state_dict(self, state_dict, move_to_device=True):\n \"\"\"Load an optimizer state.\n\n Arguments:\n state_dict (`dict`):\n An optimizer state (should be returned from a call to `state_dict`) to load.\n move_to_device (`bool`, defaults to `True`):\n Whether to move the optimizer's state to the device.\n \"\"\"\n # deepcopy, to be consistent with module API\n state_dict = deepcopy(state_dict)\n # Validate the state_dict\n groups = self.param_groups\n saved_groups = state_dict[\"param_groups\"]\n\n if len(groups) != len(saved_groups):\n raise ValueError(\"loaded state dict has a different number of parameter groups\")\n param_lens = (len(g[\"params\"]) for g in groups)\n saved_lens = (len(g[\"params\"]) for g in saved_groups)\n if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):\n raise ValueError(\n \"loaded state dict contains a parameter group that doesn't match the size of optimizer's group\",\n )\n\n # Update the state\n id_map = {\n old_id: p\n for old_id, p in zip(\n chain.from_iterable(g[\"params\"] for g in saved_groups),\n chain.from_iterable(g[\"params\"] for g in groups),\n )\n }\n\n def cast(param, value):\n r\"\"\"Make a deep copy of value, casting all tensors to device of param.\"\"\"\n if isinstance(value, torch.Tensor):\n # Floating-point types are a bit special here. They are the only ones\n # that are assumed to always match the type of params.\n if param.is_floating_point() and value.dtype != torch.uint8:\n value = value.to(param.dtype)\n return value\n elif isinstance(value, dict):\n for k, v in value.items():\n if k in self.non_castable_tensor_keys:\n if move_to_device:\n value[k] = v.to(param.device)\n else:\n value[k] = cast(param, v)\n\n return value\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(cast(param, v) for v in value)\n else:\n return value\n\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n\n def to_gpu(self):\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p in self.state:\n values = self.state[p]\n for k, v in values.items():\n if isinstance(v, torch.Tensor):\n is_paged = getattr(v, \"is_paged\", False)\n if not is_paged:\n self.state[p][k] = v.to(p.device)\n\n def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)\n found = False\n for gindex, group in enumerate(self.param_groups):\n if found:\n break\n for pindex, p in enumerate(group[\"params\"]):\n if found:\n break\n if id(p) == id(pmodule):\n # found the matching parameter\n # init override\n self.mng.pid2config[id(p)] = config\n self.mng.index2config[(gindex, pindex)] = self.mng.pid2config[id(p)]\n found = True\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Perform a single optimization step.\n\n Arguments:\n closure (`Callable`, *optional*, defaults to `None`):\n A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n if not self.initialized:\n self.check_overrides()\n self.to_gpu() # needed for fairseq pure fp16 training\n self.initialized = True\n\n # if self.is_paged: self.page_mng.prefetch_all()\n p = None\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p.grad is None:\n continue\n state = self.state[p]\n if len(state) == 0:\n self.init_state(group, p, gindex, pindex)\n\n self.prefetch_state(p)\n self.update_step(group, p, gindex, pindex)\n sync_gpu(p)\n if self.is_paged and p is not None:\n # all paged operations are asynchronous, we need\n # to sync to make sure all tensors are in the right state\n sync_gpu(p)\n\n return loss\n\n def get_config(self, gindex, pindex, group):\n config = {}\n config[\"betas\"] = group[\"betas\"]\n config[\"eps\"] = group[\"eps\"]\n config[\"weight_decay\"] = group[\"weight_decay\"]\n config[\"lr\"] = group[\"lr\"]\n config[\"alpha\"] = group.get(\"alpha\", 0.0)\n config[\"t_alpha\"] = group.get(\"t_alpha\", 0)\n config[\"t_beta3\"] = group.get(\"t_beta3\", 0)\n config[\"optim_bits\"] = self.args.optim_bits\n config[\"min_8bit_size\"] = self.args.min_8bit_size\n config[\"percentile_clipping\"] = self.args.percentile_clipping\n config[\"block_wise\"] = self.args.block_wise\n config[\"max_unorm\"] = self.args.max_unorm\n config[\"skip_zeros\"] = self.args.skip_zeros\n\n if (gindex, pindex) in self.mng.index2config:\n config.update(self.mng.index2config[(gindex, pindex)])\n return config\n\n def init_state(self, group, p, gindex, pindex):\n raise NotImplementedError(\"init_state method needs to be overridden\")\n\n def update_step(self, group, p, gindex, pindex):\n raise NotImplementedError(\"The update_step method needs to be overridden\")\n\n def get_state_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros_like(p, dtype=dtype, device=p.device)\n else:\n # > 1 MB\n buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n def prefetch_state(self, p):\n if self.is_paged:\n state = self.state[p]\n s1 = state[\"state1\"]\n is_paged = getattr(s1, \"is_paged\", False)\n if is_paged:\n F.prefetch_tensor(state[\"state1\"])\n if \"state2\" in state:\n F.prefetch_tensor(state[\"state2\"])\n\n\nclass Optimizer2State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n alpha=0.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n ):\n \"\"\"\n Base 2-state update optimizer class.\n\n Arguments:\n optimizer_name (`str`):\n The name of the optimizer.\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple`, defaults to (0.9, 0.999)):\n The beta values for the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value for the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 0.0):\n The maximum value to normalize each block with.\n skip_zeros (`bool`, defaults to `False`):\n Whether to skip zero values for sparse gradients and models to ensure correct updates.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n alpha (`float`, defaults to 0.0):\n The alpha value for the AdEMAMix optimizer.\n t_alpha (`Optional[int]`, defaults to `None`):\n Number of iterations for alpha scheduling with AdEMAMix.\n t_beta3 (`Optional[int]`, defaults to `None`):\n Number of iterations for beta scheduling with AdEMAMix.\n\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if isinstance(betas, str):\n # format: '(beta1, beta2)'\n betas = betas.replace(\"(\", \"\").replace(\")\", \"\").strip().split(\",\")\n betas = [float(b) for b in betas]\n for i in range(len(betas)):\n if not 0.0 <= betas[i] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index {i}: {betas[i]}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, alpha=alpha, t_alpha=t_alpha, t_beta3=t_beta3\n )\n\n super().__init__(params, defaults, optim_bits, is_paged)\n\n if args is None:\n args = {}\n args[\"optim_bits\"] = optim_bits\n args[\"min_8bit_size\"] = min_8bit_size\n args[\"percentile_clipping\"] = percentile_clipping\n args[\"block_wise\"] = block_wise\n args[\"max_unorm\"] = max_unorm\n args[\"skip_zeros\"] = skip_zeros\n\n self.args = MockArgs(args)\n else:\n self.args = args\n\n self.optimizer_name = optimizer_name\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.float32:\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.float32)\n state[\"state2\"] = self.get_state_buffer(p, dtype=torch.float32)\n elif dtype == torch.uint8:\n if state[\"step\"] == 0:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = self.name2qmap[\"dynamic\"].to(p.device)\n self.name2qmap[\"udynamic\"] = self.name2qmap[\"udynamic\"].to(p.device)\n\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap1\"] = self.name2qmap[\"dynamic\"]\n\n state[\"state2\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap2\"] = self.name2qmap[\"udynamic\"]\n\n if config[\"block_wise\"]:\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n state[\"absmax2\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n else:\n state[\"max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"max2\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max2\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n\n if config[\"percentile_clipping\"] < 100:\n state[\"gnorm_vec\"] = torch.zeros((100,), device=p.device)\n\n if config[\"max_unorm\"] > 0.0:\n state[\"unorm_vec\"] = torch.zeros((1,), device=p.device)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n # avoid update error from non-contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n config = self.get_config(gindex, pindex, group)\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n if config[\"percentile_clipping\"] < 100:\n current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(\n grad,\n state[\"gnorm_vec\"],\n step,\n config[\"percentile_clipping\"],\n )\n else:\n gnorm_scale = 1.0\n\n if state[\"state1\"].dtype == torch.float:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n config[\"betas\"\n# ... truncated ...","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.MockArgs","uri":"program://bitsandbytes/class/bitsandbytes.optim.optimizer.MockArgs#L16-L19","kind":"class","name":"MockArgs","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":16,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections import abc as container_abcs, defaultdict\nfrom copy import deepcopy\nfrom itertools import chain\nfrom typing import Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass GlobalOptimManager:\n \"\"\"\n A global optimizer manager for enabling custom optimizer configs.\n \"\"\"\n\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.pid2config = {}\n self.index2config = {}\n self.optimizer = None\n self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.GlobalOptimManager","uri":"program://bitsandbytes/class/bitsandbytes.optim.optimizer.GlobalOptimManager#L22-L110","kind":"class","name":"GlobalOptimManager","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":22,"end_line":110,"context_start_line":2,"context_end_line":130,"code":"#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections import abc as container_abcs, defaultdict\nfrom copy import deepcopy\nfrom itertools import chain\nfrom typing import Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass GlobalOptimManager:\n \"\"\"\n A global optimizer manager for enabling custom optimizer configs.\n \"\"\"\n\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.pid2config = {}\n self.index2config = {}\n self.optimizer = None\n self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def register_parameters(self, params):\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{\"params\": param_groups}]\n\n for group_index, group in enumerate(param_groups):\n for p_index, p in enumerate(group[\"params\"]):\n if id(p) in self.pid2config:\n self.index2config[(group_index, p_index)] = self.pid2config[id(p)]\n\n def override_config(self, parameters, key=None, value=None, key_value_dict=None):\n \"\"\"\n Override initial optimizer config with specific hyperparameters.\n\n The key-values of the optimizer config for the input parameters are overridden\n This can be both, optimizer parameters like `betas` or `lr`, or it can be\n 8-bit specific parameters like `optim_bits` or `percentile_clipping`.\n\n Arguments:\n parameters (`torch.Tensor` or `list(torch.Tensors)`):\n The input parameters.\n key (`str`):\n The hyperparameter to override.\n value:\n The hyperparameter value.\n key_value_dict (`dict`):\n A dictionary with multiple key-values to override.\n\n Example:\n\n ```py\n import torch\n import bitsandbytes as bnb\n\n mng = bnb.optim.GlobalOptimManager.get_instance()\n\n model = MyModel()\n mng.register_parameters(model.parameters()) # 1. register parameters while still on CPU\n\n model = model.cuda()\n # use 8-bit optimizer states for all parameters\n adam = bnb.optim.Adam(model.parameters(), lr=0.001, optim_bits=8)\n\n # 2. override: the parameter model.fc1.weight now uses 32-bit Adam\n mng.override_config(model.fc1.weight, 'optim_bits', 32)\n ```\n \"\"\"\n self.uses_config_override = True\n if isinstance(parameters, torch.nn.Parameter):\n parameters = [parameters]\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n if key is not None and value is not None:\n assert key_value_dict is None\n key_value_dict = {key: value}\n\n if key_value_dict is not None:\n for p in parameters:\n if id(p) in self.pid2config:\n self.pid2config[id(p)].update(key_value_dict)\n else:\n self.pid2config[id(p)] = key_value_dict\n\n def register_module_override(self, module, param_name, config):\n self.module_weight_config_triple.append((module, param_name, config))\n\n\nclass Optimizer8bit(torch.optim.Optimizer):\n def __init__(self, params, defaults, optim_bits=32, is_paged=False):\n \"\"\"\n Base 8-bit optimizer class.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(params, defaults)\n self.initialized = False\n self.name2qmap = {}\n self.is_paged = is_paged\n self.page_mng = F.GlobalPageManager.get_instance()","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.Optimizer8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.optimizer.Optimizer8bit#L113-L344","kind":"class","name":"Optimizer8bit","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":113,"end_line":344,"context_start_line":93,"context_end_line":364,"code":" self.uses_config_override = True\n if isinstance(parameters, torch.nn.Parameter):\n parameters = [parameters]\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n if key is not None and value is not None:\n assert key_value_dict is None\n key_value_dict = {key: value}\n\n if key_value_dict is not None:\n for p in parameters:\n if id(p) in self.pid2config:\n self.pid2config[id(p)].update(key_value_dict)\n else:\n self.pid2config[id(p)] = key_value_dict\n\n def register_module_override(self, module, param_name, config):\n self.module_weight_config_triple.append((module, param_name, config))\n\n\nclass Optimizer8bit(torch.optim.Optimizer):\n def __init__(self, params, defaults, optim_bits=32, is_paged=False):\n \"\"\"\n Base 8-bit optimizer class.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(params, defaults)\n self.initialized = False\n self.name2qmap = {}\n self.is_paged = is_paged\n self.page_mng = F.GlobalPageManager.get_instance()\n\n self.mng = GlobalOptimManager.get_instance()\n self.non_castable_tensor_keys = {\n \"qmap1\",\n \"qmap2\",\n \"max1\",\n \"max2\",\n \"new_max1\",\n \"new_max2\",\n \"state1\",\n \"state2\",\n \"gnorm_vec\",\n \"absmax1\",\n \"absmax2\",\n \"unorm_vec\",\n }\n\n if optim_bits == 8:\n self.fill_qmap()\n\n def fill_qmap(self):\n self.name2qmap[\"dynamic\"] = F.create_dynamic_map(signed=True)\n self.name2qmap[\"udynamic\"] = F.create_dynamic_map(signed=False)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n\n def load_state_dict(self, state_dict, move_to_device=True):\n \"\"\"Load an optimizer state.\n\n Arguments:\n state_dict (`dict`):\n An optimizer state (should be returned from a call to `state_dict`) to load.\n move_to_device (`bool`, defaults to `True`):\n Whether to move the optimizer's state to the device.\n \"\"\"\n # deepcopy, to be consistent with module API\n state_dict = deepcopy(state_dict)\n # Validate the state_dict\n groups = self.param_groups\n saved_groups = state_dict[\"param_groups\"]\n\n if len(groups) != len(saved_groups):\n raise ValueError(\"loaded state dict has a different number of parameter groups\")\n param_lens = (len(g[\"params\"]) for g in groups)\n saved_lens = (len(g[\"params\"]) for g in saved_groups)\n if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):\n raise ValueError(\n \"loaded state dict contains a parameter group that doesn't match the size of optimizer's group\",\n )\n\n # Update the state\n id_map = {\n old_id: p\n for old_id, p in zip(\n chain.from_iterable(g[\"params\"] for g in saved_groups),\n chain.from_iterable(g[\"params\"] for g in groups),\n )\n }\n\n def cast(param, value):\n r\"\"\"Make a deep copy of value, casting all tensors to device of param.\"\"\"\n if isinstance(value, torch.Tensor):\n # Floating-point types are a bit special here. They are the only ones\n # that are assumed to always match the type of params.\n if param.is_floating_point() and value.dtype != torch.uint8:\n value = value.to(param.dtype)\n return value\n elif isinstance(value, dict):\n for k, v in value.items():\n if k in self.non_castable_tensor_keys:\n if move_to_device:\n value[k] = v.to(param.device)\n else:\n value[k] = cast(param, v)\n\n return value\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(cast(param, v) for v in value)\n else:\n return value\n\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n\n def to_gpu(self):\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p in self.state:\n values = self.state[p]\n for k, v in values.items():\n if isinstance(v, torch.Tensor):\n is_paged = getattr(v, \"is_paged\", False)\n if not is_paged:\n self.state[p][k] = v.to(p.device)\n\n def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)\n found = False\n for gindex, group in enumerate(self.param_groups):\n if found:\n break\n for pindex, p in enumerate(group[\"params\"]):\n if found:\n break\n if id(p) == id(pmodule):\n # found the matching parameter\n # init override\n self.mng.pid2config[id(p)] = config\n self.mng.index2config[(gindex, pindex)] = self.mng.pid2config[id(p)]\n found = True\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Perform a single optimization step.\n\n Arguments:\n closure (`Callable`, *optional*, defaults to `None`):\n A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n if not self.initialized:\n self.check_overrides()\n self.to_gpu() # needed for fairseq pure fp16 training\n self.initialized = True\n\n # if self.is_paged: self.page_mng.prefetch_all()\n p = None\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p.grad is None:\n continue\n state = self.state[p]\n if len(state) == 0:\n self.init_state(group, p, gindex, pindex)\n\n self.prefetch_state(p)\n self.update_step(group, p, gindex, pindex)\n sync_gpu(p)\n if self.is_paged and p is not None:\n # all paged operations are asynchronous, we need\n # to sync to make sure all tensors are in the right state\n sync_gpu(p)\n\n return loss\n\n def get_config(self, gindex, pindex, group):\n config = {}\n config[\"betas\"] = group[\"betas\"]\n config[\"eps\"] = group[\"eps\"]\n config[\"weight_decay\"] = group[\"weight_decay\"]\n config[\"lr\"] = group[\"lr\"]\n config[\"alpha\"] = group.get(\"alpha\", 0.0)\n config[\"t_alpha\"] = group.get(\"t_alpha\", 0)\n config[\"t_beta3\"] = group.get(\"t_beta3\", 0)\n config[\"optim_bits\"] = self.args.optim_bits\n config[\"min_8bit_size\"] = self.args.min_8bit_size\n config[\"percentile_clipping\"] = self.args.percentile_clipping\n config[\"block_wise\"] = self.args.block_wise\n config[\"max_unorm\"] = self.args.max_unorm\n config[\"skip_zeros\"] = self.args.skip_zeros\n\n if (gindex, pindex) in self.mng.index2config:\n config.update(self.mng.index2config[(gindex, pindex)])\n return config\n\n def init_state(self, group, p, gindex, pindex):\n raise NotImplementedError(\"init_state method needs to be overridden\")\n\n def update_step(self, group, p, gindex, pindex):\n raise NotImplementedError(\"The update_step method needs to be overridden\")\n\n def get_state_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros_like(p, dtype=dtype, device=p.device)\n else:\n # > 1 MB\n buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n def prefetch_state(self, p):\n if self.is_paged:\n state = self.state[p]\n s1 = state[\"state1\"]\n is_paged = getattr(s1, \"is_paged\", False)\n if is_paged:\n F.prefetch_tensor(state[\"state1\"])\n if \"state2\" in state:\n F.prefetch_tensor(state[\"state2\"])\n\n\nclass Optimizer2State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n alpha=0.0,","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.Optimizer2State","uri":"program://bitsandbytes/class/bitsandbytes.optim.optimizer.Optimizer2State#L347-L588","kind":"class","name":"Optimizer2State","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":347,"end_line":588,"context_start_line":327,"context_end_line":608,"code":" if not self.is_paged or p.numel() < 1e5:\n return torch.zeros_like(p, dtype=dtype, device=p.device)\n else:\n # > 1 MB\n buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n def prefetch_state(self, p):\n if self.is_paged:\n state = self.state[p]\n s1 = state[\"state1\"]\n is_paged = getattr(s1, \"is_paged\", False)\n if is_paged:\n F.prefetch_tensor(state[\"state1\"])\n if \"state2\" in state:\n F.prefetch_tensor(state[\"state2\"])\n\n\nclass Optimizer2State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n alpha=0.0,\n t_alpha: Optional[int] = None,\n t_beta3: Optional[int] = None,\n ):\n \"\"\"\n Base 2-state update optimizer class.\n\n Arguments:\n optimizer_name (`str`):\n The name of the optimizer.\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple`, defaults to (0.9, 0.999)):\n The beta values for the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value for the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 0.0):\n The maximum value to normalize each block with.\n skip_zeros (`bool`, defaults to `False`):\n Whether to skip zero values for sparse gradients and models to ensure correct updates.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n alpha (`float`, defaults to 0.0):\n The alpha value for the AdEMAMix optimizer.\n t_alpha (`Optional[int]`, defaults to `None`):\n Number of iterations for alpha scheduling with AdEMAMix.\n t_beta3 (`Optional[int]`, defaults to `None`):\n Number of iterations for beta scheduling with AdEMAMix.\n\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if isinstance(betas, str):\n # format: '(beta1, beta2)'\n betas = betas.replace(\"(\", \"\").replace(\")\", \"\").strip().split(\",\")\n betas = [float(b) for b in betas]\n for i in range(len(betas)):\n if not 0.0 <= betas[i] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index {i}: {betas[i]}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, alpha=alpha, t_alpha=t_alpha, t_beta3=t_beta3\n )\n\n super().__init__(params, defaults, optim_bits, is_paged)\n\n if args is None:\n args = {}\n args[\"optim_bits\"] = optim_bits\n args[\"min_8bit_size\"] = min_8bit_size\n args[\"percentile_clipping\"] = percentile_clipping\n args[\"block_wise\"] = block_wise\n args[\"max_unorm\"] = max_unorm\n args[\"skip_zeros\"] = skip_zeros\n\n self.args = MockArgs(args)\n else:\n self.args = args\n\n self.optimizer_name = optimizer_name\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.float32:\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.float32)\n state[\"state2\"] = self.get_state_buffer(p, dtype=torch.float32)\n elif dtype == torch.uint8:\n if state[\"step\"] == 0:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = self.name2qmap[\"dynamic\"].to(p.device)\n self.name2qmap[\"udynamic\"] = self.name2qmap[\"udynamic\"].to(p.device)\n\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap1\"] = self.name2qmap[\"dynamic\"]\n\n state[\"state2\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap2\"] = self.name2qmap[\"udynamic\"]\n\n if config[\"block_wise\"]:\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n state[\"absmax2\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n else:\n state[\"max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"max2\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max2\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n\n if config[\"percentile_clipping\"] < 100:\n state[\"gnorm_vec\"] = torch.zeros((100,), device=p.device)\n\n if config[\"max_unorm\"] > 0.0:\n state[\"unorm_vec\"] = torch.zeros((1,), device=p.device)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n # avoid update error from non-contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n config = self.get_config(gindex, pindex, group)\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n if config[\"percentile_clipping\"] < 100:\n current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(\n grad,\n state[\"gnorm_vec\"],\n step,\n config[\"percentile_clipping\"],\n )\n else:\n gnorm_scale = 1.0\n\n if state[\"state1\"].dtype == torch.float:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n config[\"betas\"][0],\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"state2\"],\n config[\"betas\"][1],\n config[\"betas\"][2] if len(config[\"betas\"]) >= 3 else 0.0,\n config.get(\"alpha\", 0.0),\n config[\"weight_decay\"],\n gnorm_scale,\n state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n skip_zeros=config[\"skip_zeros\"],\n )\n\n elif state[\"state1\"].dtype == torch.uint8 and not config[\"block_wise\"]:\n F.optimizer_update_8bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"max1\"],\n state[\"max2\"],\n state[\"new_max1\"],\n state[\"new_max2\"],\n config[\"weight_decay\"],\n gnorm_scale=gnorm_scale,\n unorm_vec=state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n )\n\n # swap maxes\n state[\"max1\"], state[\"new_max1\"] = state[\"new_max1\"], state[\"max1\"]\n state[\"max2\"], state[\"new_max2\"] = state[\"new_max2\"], state[\"max2\"]\n elif state[\"state1\"].dtype == torch.uint8 and config[\"block_wise\"]:\n F.optimizer_update_8bit_blockwise(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n config[\"betas\"][2] if len(config[\"betas\"]) >= 3 else 0.0,\n config.get(\"alpha\", 0.0),\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=gnorm_scale,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n\nclass Optimizer1State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.0),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n ):","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.Optimizer1State","uri":"program://bitsandbytes/class/bitsandbytes.optim.optimizer.Optimizer1State#L591-L804","kind":"class","name":"Optimizer1State","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":591,"end_line":804,"context_start_line":571,"context_end_line":804,"code":" p,\n state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n config[\"betas\"][2] if len(config[\"betas\"]) >= 3 else 0.0,\n config.get(\"alpha\", 0.0),\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=gnorm_scale,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n\nclass Optimizer1State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.0),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n ):\n \"\"\"\n Base 1-state update optimizer class.\n\n Arguments:\n optimizer_name (`str`):\n The name of the optimizer.\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple`, defaults to (0.9, 0.0)):\n The beta values for the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value for the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 0.0):\n The maximum value to normalize each block with.\n skip_zeros (`bool`, defaults to `False`):\n Whether to skip zero values for sparse gradients and models to ensure correct updates.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n for i in range(len(betas)):\n if not 0.0 <= betas[i] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index {i}: {betas[i]}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super().__init__(params, defaults, optim_bits, is_paged)\n\n if args is None:\n args = {}\n args[\"optim_bits\"] = optim_bits\n args[\"min_8bit_size\"] = min_8bit_size\n args[\"percentile_clipping\"] = percentile_clipping\n args[\"block_wise\"] = block_wise\n args[\"max_unorm\"] = max_unorm\n args[\"skip_zeros\"] = skip_zeros\n\n self.args = MockArgs(args)\n else:\n self.args = args\n\n self.optimizer_name = optimizer_name\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.float32:\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.float32)\n elif dtype == torch.uint8:\n if state[\"step\"] == 0:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = self.name2qmap[\"dynamic\"].to(p.device)\n\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap1\"] = self.name2qmap[\"dynamic\"]\n\n if config[\"block_wise\"]:\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n else:\n state[\"max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n\n if config[\"percentile_clipping\"] < 100:\n state[\"gnorm_vec\"] = torch.zeros((100,), device=p.device)\n\n if config[\"max_unorm\"] > 0.0:\n state[\"unorm_vec\"] = torch.zeros((1,), device=p.device)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n # avoid update error from non-contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n config = self.get_config(gindex, pindex, group)\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n if config[\"percentile_clipping\"] < 100:\n current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(\n grad,\n state[\"gnorm_vec\"],\n step,\n config[\"percentile_clipping\"],\n )\n else:\n gnorm_scale = 1.0\n\n if state[\"state1\"].dtype == torch.float:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n config[\"betas\"][0],\n config[\"eps\"],\n step,\n config[\"lr\"],\n None,\n config[\"betas\"][1],\n 0.0,\n 0.0,\n config[\"weight_decay\"],\n gnorm_scale,\n state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n skip_zeros=config[\"skip_zeros\"],\n )\n\n elif state[\"state1\"].dtype == torch.uint8 and not config[\"block_wise\"]:\n F.optimizer_update_8bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n None,\n config[\"betas\"][0],\n config[\"betas\"][1],\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n None,\n state[\"max1\"],\n None,\n state[\"new_max1\"],\n None,\n config[\"weight_decay\"],\n gnorm_scale,\n state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n )\n\n state[\"max1\"], state[\"new_max1\"] = state[\"new_max1\"], state[\"max1\"]\n elif state[\"state1\"].dtype == torch.uint8 and config[\"block_wise\"]:\n F.optimizer_update_8bit_blockwise(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n None,\n config[\"betas\"][0],\n config[\"betas\"][1],\n 0.0,\n 0.0,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n None,\n state[\"absmax1\"],\n None,\n config[\"weight_decay\"],\n gnorm_scale=gnorm_scale,\n skip_zeros=config[\"skip_zeros\"],\n )","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.__init__#L592-L667","kind":"function","name":"__init__","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":592,"end_line":667,"context_start_line":572,"context_end_line":687,"code":" state[\"state1\"],\n state[\"state2\"],\n config[\"betas\"][0],\n config[\"betas\"][1],\n config[\"betas\"][2] if len(config[\"betas\"]) >= 3 else 0.0,\n config.get(\"alpha\", 0.0),\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n state[\"qmap2\"],\n state[\"absmax1\"],\n state[\"absmax2\"],\n config[\"weight_decay\"],\n gnorm_scale=gnorm_scale,\n skip_zeros=config[\"skip_zeros\"],\n )\n\n\nclass Optimizer1State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.0),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n ):\n \"\"\"\n Base 1-state update optimizer class.\n\n Arguments:\n optimizer_name (`str`):\n The name of the optimizer.\n params (`torch.Tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-3):\n The learning rate.\n betas (`tuple`, defaults to (0.9, 0.0)):\n The beta values for the optimizer.\n eps (`float`, defaults to 1e-8):\n The epsilon value for the optimizer.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n max_unorm (`float`, defaults to 0.0):\n The maximum value to normalize each block with.\n skip_zeros (`bool`, defaults to `False`):\n Whether to skip zero values for sparse gradients and models to ensure correct updates.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n for i in range(len(betas)):\n if not 0.0 <= betas[i] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index {i}: {betas[i]}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super().__init__(params, defaults, optim_bits, is_paged)\n\n if args is None:\n args = {}\n args[\"optim_bits\"] = optim_bits\n args[\"min_8bit_size\"] = min_8bit_size\n args[\"percentile_clipping\"] = percentile_clipping\n args[\"block_wise\"] = block_wise\n args[\"max_unorm\"] = max_unorm\n args[\"skip_zeros\"] = skip_zeros\n\n self.args = MockArgs(args)\n else:\n self.args = args\n\n self.optimizer_name = optimizer_name\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.float32:\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.float32)","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.initialize","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.initialize#L32-L37","kind":"function","name":"initialize","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":32,"end_line":37,"context_start_line":12,"context_end_line":57,"code":"import bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass GlobalOptimManager:\n \"\"\"\n A global optimizer manager for enabling custom optimizer configs.\n \"\"\"\n\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.pid2config = {}\n self.index2config = {}\n self.optimizer = None\n self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def register_parameters(self, params):\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{\"params\": param_groups}]\n\n for group_index, group in enumerate(param_groups):\n for p_index, p in enumerate(group[\"params\"]):\n if id(p) in self.pid2config:\n self.index2config[(group_index, p_index)] = self.pid2config[id(p)]\n\n def override_config(self, parameters, key=None, value=None, key_value_dict=None):\n \"\"\"","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.get_instance","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.get_instance#L40-L44","kind":"function","name":"get_instance","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":40,"end_line":44,"context_start_line":20,"context_end_line":64,"code":"\n\nclass GlobalOptimManager:\n \"\"\"\n A global optimizer manager for enabling custom optimizer configs.\n \"\"\"\n\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.pid2config = {}\n self.index2config = {}\n self.optimizer = None\n self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def register_parameters(self, params):\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{\"params\": param_groups}]\n\n for group_index, group in enumerate(param_groups):\n for p_index, p in enumerate(group[\"params\"]):\n if id(p) in self.pid2config:\n self.index2config[(group_index, p_index)] = self.pid2config[id(p)]\n\n def override_config(self, parameters, key=None, value=None, key_value_dict=None):\n \"\"\"\n Override initial optimizer config with specific hyperparameters.\n\n The key-values of the optimizer config for the input parameters are overridden\n This can be both, optimizer parameters like `betas` or `lr`, or it can be\n 8-bit specific parameters like `optim_bits` or `percentile_clipping`.\n\n Arguments:","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.register_parameters","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.register_parameters#L46-L54","kind":"function","name":"register_parameters","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":46,"end_line":54,"context_start_line":26,"context_end_line":74,"code":"\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.pid2config = {}\n self.index2config = {}\n self.optimizer = None\n self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def register_parameters(self, params):\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{\"params\": param_groups}]\n\n for group_index, group in enumerate(param_groups):\n for p_index, p in enumerate(group[\"params\"]):\n if id(p) in self.pid2config:\n self.index2config[(group_index, p_index)] = self.pid2config[id(p)]\n\n def override_config(self, parameters, key=None, value=None, key_value_dict=None):\n \"\"\"\n Override initial optimizer config with specific hyperparameters.\n\n The key-values of the optimizer config for the input parameters are overridden\n This can be both, optimizer parameters like `betas` or `lr`, or it can be\n 8-bit specific parameters like `optim_bits` or `percentile_clipping`.\n\n Arguments:\n parameters (`torch.Tensor` or `list(torch.Tensors)`):\n The input parameters.\n key (`str`):\n The hyperparameter to override.\n value:\n The hyperparameter value.\n key_value_dict (`dict`):\n A dictionary with multiple key-values to override.\n\n Example:","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.override_config","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.override_config#L56-L107","kind":"function","name":"override_config","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":56,"end_line":107,"context_start_line":36,"context_end_line":127,"code":" self.uses_config_override = False\n self.module_weight_config_triple = []\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def register_parameters(self, params):\n param_groups = list(params)\n if not isinstance(param_groups[0], dict):\n param_groups = [{\"params\": param_groups}]\n\n for group_index, group in enumerate(param_groups):\n for p_index, p in enumerate(group[\"params\"]):\n if id(p) in self.pid2config:\n self.index2config[(group_index, p_index)] = self.pid2config[id(p)]\n\n def override_config(self, parameters, key=None, value=None, key_value_dict=None):\n \"\"\"\n Override initial optimizer config with specific hyperparameters.\n\n The key-values of the optimizer config for the input parameters are overridden\n This can be both, optimizer parameters like `betas` or `lr`, or it can be\n 8-bit specific parameters like `optim_bits` or `percentile_clipping`.\n\n Arguments:\n parameters (`torch.Tensor` or `list(torch.Tensors)`):\n The input parameters.\n key (`str`):\n The hyperparameter to override.\n value:\n The hyperparameter value.\n key_value_dict (`dict`):\n A dictionary with multiple key-values to override.\n\n Example:\n\n ```py\n import torch\n import bitsandbytes as bnb\n\n mng = bnb.optim.GlobalOptimManager.get_instance()\n\n model = MyModel()\n mng.register_parameters(model.parameters()) # 1. register parameters while still on CPU\n\n model = model.cuda()\n # use 8-bit optimizer states for all parameters\n adam = bnb.optim.Adam(model.parameters(), lr=0.001, optim_bits=8)\n\n # 2. override: the parameter model.fc1.weight now uses 32-bit Adam\n mng.override_config(model.fc1.weight, 'optim_bits', 32)\n ```\n \"\"\"\n self.uses_config_override = True\n if isinstance(parameters, torch.nn.Parameter):\n parameters = [parameters]\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n if key is not None and value is not None:\n assert key_value_dict is None\n key_value_dict = {key: value}\n\n if key_value_dict is not None:\n for p in parameters:\n if id(p) in self.pid2config:\n self.pid2config[id(p)].update(key_value_dict)\n else:\n self.pid2config[id(p)] = key_value_dict\n\n def register_module_override(self, module, param_name, config):\n self.module_weight_config_triple.append((module, param_name, config))\n\n\nclass Optimizer8bit(torch.optim.Optimizer):\n def __init__(self, params, defaults, optim_bits=32, is_paged=False):\n \"\"\"\n Base 8-bit optimizer class.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(params, defaults)\n self.initialized = False","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.register_module_override","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.register_module_override#L109-L110","kind":"function","name":"register_module_override","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":109,"end_line":110,"context_start_line":89,"context_end_line":130,"code":" # 2. override: the parameter model.fc1.weight now uses 32-bit Adam\n mng.override_config(model.fc1.weight, 'optim_bits', 32)\n ```\n \"\"\"\n self.uses_config_override = True\n if isinstance(parameters, torch.nn.Parameter):\n parameters = [parameters]\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n if key is not None and value is not None:\n assert key_value_dict is None\n key_value_dict = {key: value}\n\n if key_value_dict is not None:\n for p in parameters:\n if id(p) in self.pid2config:\n self.pid2config[id(p)].update(key_value_dict)\n else:\n self.pid2config[id(p)] = key_value_dict\n\n def register_module_override(self, module, param_name, config):\n self.module_weight_config_triple.append((module, param_name, config))\n\n\nclass Optimizer8bit(torch.optim.Optimizer):\n def __init__(self, params, defaults, optim_bits=32, is_paged=False):\n \"\"\"\n Base 8-bit optimizer class.\n\n Arguments:\n params (`torch.Tensor`):\n The input parameters to optimize.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n is_paged (`bool`, defaults to `False`):\n Whether the optimizer is a paged optimizer or not.\n \"\"\"\n super().__init__(params, defaults)\n self.initialized = False\n self.name2qmap = {}\n self.is_paged = is_paged\n self.page_mng = F.GlobalPageManager.get_instance()","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.fill_qmap","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.fill_qmap#L151-L153","kind":"function","name":"fill_qmap","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":151,"end_line":153,"context_start_line":131,"context_end_line":173,"code":"\n self.mng = GlobalOptimManager.get_instance()\n self.non_castable_tensor_keys = {\n \"qmap1\",\n \"qmap2\",\n \"max1\",\n \"max2\",\n \"new_max1\",\n \"new_max2\",\n \"state1\",\n \"state2\",\n \"gnorm_vec\",\n \"absmax1\",\n \"absmax2\",\n \"unorm_vec\",\n }\n\n if optim_bits == 8:\n self.fill_qmap()\n\n def fill_qmap(self):\n self.name2qmap[\"dynamic\"] = F.create_dynamic_map(signed=True)\n self.name2qmap[\"udynamic\"] = F.create_dynamic_map(signed=False)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n\n def load_state_dict(self, state_dict, move_to_device=True):\n \"\"\"Load an optimizer state.\n\n Arguments:\n state_dict (`dict`):\n An optimizer state (should be returned from a call to `state_dict`) to load.\n move_to_device (`bool`, defaults to `True`):\n Whether to move the optimizer's state to the device.\n \"\"\"\n # deepcopy, to be consistent with module API\n state_dict = deepcopy(state_dict)\n # Validate the state_dict\n groups = self.param_groups\n saved_groups = state_dict[\"param_groups\"]\n\n if len(groups) != len(saved_groups):","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.__setstate__","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.__setstate__#L155-L156","kind":"function","name":"__setstate__","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":155,"end_line":156,"context_start_line":135,"context_end_line":176,"code":" \"qmap2\",\n \"max1\",\n \"max2\",\n \"new_max1\",\n \"new_max2\",\n \"state1\",\n \"state2\",\n \"gnorm_vec\",\n \"absmax1\",\n \"absmax2\",\n \"unorm_vec\",\n }\n\n if optim_bits == 8:\n self.fill_qmap()\n\n def fill_qmap(self):\n self.name2qmap[\"dynamic\"] = F.create_dynamic_map(signed=True)\n self.name2qmap[\"udynamic\"] = F.create_dynamic_map(signed=False)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n\n def load_state_dict(self, state_dict, move_to_device=True):\n \"\"\"Load an optimizer state.\n\n Arguments:\n state_dict (`dict`):\n An optimizer state (should be returned from a call to `state_dict`) to load.\n move_to_device (`bool`, defaults to `True`):\n Whether to move the optimizer's state to the device.\n \"\"\"\n # deepcopy, to be consistent with module API\n state_dict = deepcopy(state_dict)\n # Validate the state_dict\n groups = self.param_groups\n saved_groups = state_dict[\"param_groups\"]\n\n if len(groups) != len(saved_groups):\n raise ValueError(\"loaded state dict has a different number of parameter groups\")\n param_lens = (len(g[\"params\"]) for g in groups)\n saved_lens = (len(g[\"params\"]) for g in saved_groups)","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.load_state_dict","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.load_state_dict#L158-L230","kind":"function","name":"load_state_dict","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":158,"end_line":230,"context_start_line":138,"context_end_line":250,"code":" \"new_max1\",\n \"new_max2\",\n \"state1\",\n \"state2\",\n \"gnorm_vec\",\n \"absmax1\",\n \"absmax2\",\n \"unorm_vec\",\n }\n\n if optim_bits == 8:\n self.fill_qmap()\n\n def fill_qmap(self):\n self.name2qmap[\"dynamic\"] = F.create_dynamic_map(signed=True)\n self.name2qmap[\"udynamic\"] = F.create_dynamic_map(signed=False)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n\n def load_state_dict(self, state_dict, move_to_device=True):\n \"\"\"Load an optimizer state.\n\n Arguments:\n state_dict (`dict`):\n An optimizer state (should be returned from a call to `state_dict`) to load.\n move_to_device (`bool`, defaults to `True`):\n Whether to move the optimizer's state to the device.\n \"\"\"\n # deepcopy, to be consistent with module API\n state_dict = deepcopy(state_dict)\n # Validate the state_dict\n groups = self.param_groups\n saved_groups = state_dict[\"param_groups\"]\n\n if len(groups) != len(saved_groups):\n raise ValueError(\"loaded state dict has a different number of parameter groups\")\n param_lens = (len(g[\"params\"]) for g in groups)\n saved_lens = (len(g[\"params\"]) for g in saved_groups)\n if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):\n raise ValueError(\n \"loaded state dict contains a parameter group that doesn't match the size of optimizer's group\",\n )\n\n # Update the state\n id_map = {\n old_id: p\n for old_id, p in zip(\n chain.from_iterable(g[\"params\"] for g in saved_groups),\n chain.from_iterable(g[\"params\"] for g in groups),\n )\n }\n\n def cast(param, value):\n r\"\"\"Make a deep copy of value, casting all tensors to device of param.\"\"\"\n if isinstance(value, torch.Tensor):\n # Floating-point types are a bit special here. They are the only ones\n # that are assumed to always match the type of params.\n if param.is_floating_point() and value.dtype != torch.uint8:\n value = value.to(param.dtype)\n return value\n elif isinstance(value, dict):\n for k, v in value.items():\n if k in self.non_castable_tensor_keys:\n if move_to_device:\n value[k] = v.to(param.device)\n else:\n value[k] = cast(param, v)\n\n return value\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(cast(param, v) for v in value)\n else:\n return value\n\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n\n def to_gpu(self):\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p in self.state:\n values = self.state[p]\n for k, v in values.items():\n if isinstance(v, torch.Tensor):\n is_paged = getattr(v, \"is_paged\", False)\n if not is_paged:\n self.state[p][k] = v.to(p.device)\n\n def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)\n found = False\n for gindex, group in enumerate(self.param_groups):\n if found:","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.to_gpu","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.to_gpu#L232-L241","kind":"function","name":"to_gpu","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":232,"end_line":241,"context_start_line":212,"context_end_line":261,"code":"\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n\n def to_gpu(self):\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p in self.state:\n values = self.state[p]\n for k, v in values.items():\n if isinstance(v, torch.Tensor):\n is_paged = getattr(v, \"is_paged\", False)\n if not is_paged:\n self.state[p][k] = v.to(p.device)\n\n def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)\n found = False\n for gindex, group in enumerate(self.param_groups):\n if found:\n break\n for pindex, p in enumerate(group[\"params\"]):\n if found:\n break\n if id(p) == id(pmodule):\n # found the matching parameter\n # init override\n self.mng.pid2config[id(p)] = config\n self.mng.index2config[(gindex, pindex)] = self.mng.pid2config[id(p)]\n found = True\n","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.check_overrides","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.check_overrides#L243-L260","kind":"function","name":"check_overrides","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":243,"end_line":260,"context_start_line":223,"context_end_line":280,"code":"\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n\n def to_gpu(self):\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p in self.state:\n values = self.state[p]\n for k, v in values.items():\n if isinstance(v, torch.Tensor):\n is_paged = getattr(v, \"is_paged\", False)\n if not is_paged:\n self.state[p][k] = v.to(p.device)\n\n def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)\n found = False\n for gindex, group in enumerate(self.param_groups):\n if found:\n break\n for pindex, p in enumerate(group[\"params\"]):\n if found:\n break\n if id(p) == id(pmodule):\n # found the matching parameter\n # init override\n self.mng.pid2config[id(p)] = config\n self.mng.index2config[(gindex, pindex)] = self.mng.pid2config[id(p)]\n found = True\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Perform a single optimization step.\n\n Arguments:\n closure (`Callable`, *optional*, defaults to `None`):\n A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n if not self.initialized:\n self.check_overrides()\n self.to_gpu() # needed for fairseq pure fp16 training\n self.initialized = True\n\n # if self.is_paged: self.page_mng.prefetch_all()","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.step","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.step#L263-L298","kind":"function","name":"step","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":263,"end_line":298,"context_start_line":243,"context_end_line":318,"code":" def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)\n found = False\n for gindex, group in enumerate(self.param_groups):\n if found:\n break\n for pindex, p in enumerate(group[\"params\"]):\n if found:\n break\n if id(p) == id(pmodule):\n # found the matching parameter\n # init override\n self.mng.pid2config[id(p)] = config\n self.mng.index2config[(gindex, pindex)] = self.mng.pid2config[id(p)]\n found = True\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Perform a single optimization step.\n\n Arguments:\n closure (`Callable`, *optional*, defaults to `None`):\n A closure that reevaluates the model and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n if not self.initialized:\n self.check_overrides()\n self.to_gpu() # needed for fairseq pure fp16 training\n self.initialized = True\n\n # if self.is_paged: self.page_mng.prefetch_all()\n p = None\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p.grad is None:\n continue\n state = self.state[p]\n if len(state) == 0:\n self.init_state(group, p, gindex, pindex)\n\n self.prefetch_state(p)\n self.update_step(group, p, gindex, pindex)\n sync_gpu(p)\n if self.is_paged and p is not None:\n # all paged operations are asynchronous, we need\n # to sync to make sure all tensors are in the right state\n sync_gpu(p)\n\n return loss\n\n def get_config(self, gindex, pindex, group):\n config = {}\n config[\"betas\"] = group[\"betas\"]\n config[\"eps\"] = group[\"eps\"]\n config[\"weight_decay\"] = group[\"weight_decay\"]\n config[\"lr\"] = group[\"lr\"]\n config[\"alpha\"] = group.get(\"alpha\", 0.0)\n config[\"t_alpha\"] = group.get(\"t_alpha\", 0)\n config[\"t_beta3\"] = group.get(\"t_beta3\", 0)\n config[\"optim_bits\"] = self.args.optim_bits\n config[\"min_8bit_size\"] = self.args.min_8bit_size\n config[\"percentile_clipping\"] = self.args.percentile_clipping\n config[\"block_wise\"] = self.args.block_wise\n config[\"max_unorm\"] = self.args.max_unorm\n config[\"skip_zeros\"] = self.args.skip_zeros\n\n if (gindex, pindex) in self.mng.index2config:\n config.update(self.mng.index2config[(gindex, pindex)])\n return config","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.get_config","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.get_config#L300-L318","kind":"function","name":"get_config","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":300,"end_line":318,"context_start_line":280,"context_end_line":338,"code":" # if self.is_paged: self.page_mng.prefetch_all()\n p = None\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p.grad is None:\n continue\n state = self.state[p]\n if len(state) == 0:\n self.init_state(group, p, gindex, pindex)\n\n self.prefetch_state(p)\n self.update_step(group, p, gindex, pindex)\n sync_gpu(p)\n if self.is_paged and p is not None:\n # all paged operations are asynchronous, we need\n # to sync to make sure all tensors are in the right state\n sync_gpu(p)\n\n return loss\n\n def get_config(self, gindex, pindex, group):\n config = {}\n config[\"betas\"] = group[\"betas\"]\n config[\"eps\"] = group[\"eps\"]\n config[\"weight_decay\"] = group[\"weight_decay\"]\n config[\"lr\"] = group[\"lr\"]\n config[\"alpha\"] = group.get(\"alpha\", 0.0)\n config[\"t_alpha\"] = group.get(\"t_alpha\", 0)\n config[\"t_beta3\"] = group.get(\"t_beta3\", 0)\n config[\"optim_bits\"] = self.args.optim_bits\n config[\"min_8bit_size\"] = self.args.min_8bit_size\n config[\"percentile_clipping\"] = self.args.percentile_clipping\n config[\"block_wise\"] = self.args.block_wise\n config[\"max_unorm\"] = self.args.max_unorm\n config[\"skip_zeros\"] = self.args.skip_zeros\n\n if (gindex, pindex) in self.mng.index2config:\n config.update(self.mng.index2config[(gindex, pindex)])\n return config\n\n def init_state(self, group, p, gindex, pindex):\n raise NotImplementedError(\"init_state method needs to be overridden\")\n\n def update_step(self, group, p, gindex, pindex):\n raise NotImplementedError(\"The update_step method needs to be overridden\")\n\n def get_state_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros_like(p, dtype=dtype, device=p.device)\n else:\n # > 1 MB\n buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n def prefetch_state(self, p):\n if self.is_paged:\n state = self.state[p]","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.init_state","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.init_state#L670-L711","kind":"function","name":"init_state","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":670,"end_line":711,"context_start_line":650,"context_end_line":731,"code":" raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)\n super().__init__(params, defaults, optim_bits, is_paged)\n\n if args is None:\n args = {}\n args[\"optim_bits\"] = optim_bits\n args[\"min_8bit_size\"] = min_8bit_size\n args[\"percentile_clipping\"] = percentile_clipping\n args[\"block_wise\"] = block_wise\n args[\"max_unorm\"] = max_unorm\n args[\"skip_zeros\"] = skip_zeros\n\n self.args = MockArgs(args)\n else:\n self.args = args\n\n self.optimizer_name = optimizer_name\n\n @torch.no_grad()\n def init_state(self, group, p, gindex, pindex):\n config = self.get_config(gindex, pindex, group)\n\n if config[\"optim_bits\"] == 32:\n dtype = torch.float32\n elif config[\"optim_bits\"] == 8:\n dtype = torch.uint8\n else:\n raise NotImplementedError(f\"Amount of optimizer bits not supported: {config['optim_bits']}\")\n\n if p.numel() < config[\"min_8bit_size\"]:\n dtype = torch.float32\n\n state = self.state[p]\n state[\"step\"] = 0\n\n if dtype == torch.float32:\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.float32)\n elif dtype == torch.uint8:\n if state[\"step\"] == 0:\n if \"dynamic\" not in self.name2qmap:\n self.fill_qmap()\n self.name2qmap[\"dynamic\"] = self.name2qmap[\"dynamic\"].to(p.device)\n\n state[\"state1\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap1\"] = self.name2qmap[\"dynamic\"]\n\n if config[\"block_wise\"]:\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n else:\n state[\"max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n\n if config[\"percentile_clipping\"] < 100:\n state[\"gnorm_vec\"] = torch.zeros((100,), device=p.device)\n\n if config[\"max_unorm\"] > 0.0:\n state[\"unorm_vec\"] = torch.zeros((1,), device=p.device)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n # avoid update error from non-contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n config = self.get_config(gindex, pindex, group)\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n if config[\"percentile_clipping\"] < 100:\n current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(\n grad,\n state[\"gnorm_vec\"],\n step,","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.update_step","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.update_step#L714-L804","kind":"function","name":"update_step","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":714,"end_line":804,"context_start_line":694,"context_end_line":804,"code":" state[\"state1\"] = self.get_state_buffer(p, dtype=torch.uint8)\n state[\"qmap1\"] = self.name2qmap[\"dynamic\"]\n\n if config[\"block_wise\"]:\n blocksize = 256\n n = p.numel()\n blocks = (n // blocksize) + bool(n % blocksize)\n\n state[\"absmax1\"] = torch.zeros((blocks,), dtype=torch.float32, device=p.device)\n else:\n state[\"max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n state[\"new_max1\"] = torch.zeros((1,), dtype=torch.float32, device=p.device)\n\n if config[\"percentile_clipping\"] < 100:\n state[\"gnorm_vec\"] = torch.zeros((100,), device=p.device)\n\n if config[\"max_unorm\"] > 0.0:\n state[\"unorm_vec\"] = torch.zeros((1,), device=p.device)\n\n @torch.no_grad()\n def update_step(self, group, p, gindex, pindex):\n # avoid update error from non-contiguous memory layout\n p.data = p.data.contiguous()\n p.grad = p.grad.contiguous()\n\n state = self.state[p]\n grad = p.grad\n\n config = self.get_config(gindex, pindex, group)\n\n state[\"step\"] += 1\n step = state[\"step\"]\n\n if config[\"percentile_clipping\"] < 100:\n current_gnorm, clip_value, gnorm_scale = F.percentile_clipping(\n grad,\n state[\"gnorm_vec\"],\n step,\n config[\"percentile_clipping\"],\n )\n else:\n gnorm_scale = 1.0\n\n if state[\"state1\"].dtype == torch.float:\n F.optimizer_update_32bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n config[\"betas\"][0],\n config[\"eps\"],\n step,\n config[\"lr\"],\n None,\n config[\"betas\"][1],\n 0.0,\n 0.0,\n config[\"weight_decay\"],\n gnorm_scale,\n state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n skip_zeros=config[\"skip_zeros\"],\n )\n\n elif state[\"state1\"].dtype == torch.uint8 and not config[\"block_wise\"]:\n F.optimizer_update_8bit(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n None,\n config[\"betas\"][0],\n config[\"betas\"][1],\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n None,\n state[\"max1\"],\n None,\n state[\"new_max1\"],\n None,\n config[\"weight_decay\"],\n gnorm_scale,\n state[\"unorm_vec\"] if config[\"max_unorm\"] > 0.0 else None,\n max_unorm=config[\"max_unorm\"],\n )\n\n state[\"max1\"], state[\"new_max1\"] = state[\"new_max1\"], state[\"max1\"]\n elif state[\"state1\"].dtype == torch.uint8 and config[\"block_wise\"]:\n F.optimizer_update_8bit_blockwise(\n self.optimizer_name,\n grad,\n p,\n state[\"state1\"],\n None,\n config[\"betas\"][0],\n config[\"betas\"][1],\n 0.0,\n 0.0,\n config[\"eps\"],\n step,\n config[\"lr\"],\n state[\"qmap1\"],\n None,\n state[\"absmax1\"],\n None,\n config[\"weight_decay\"],\n gnorm_scale=gnorm_scale,\n skip_zeros=config[\"skip_zeros\"],\n )","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.get_state_buffer","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.get_state_buffer#L326-L334","kind":"function","name":"get_state_buffer","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":326,"end_line":334,"context_start_line":306,"context_end_line":354,"code":" config[\"alpha\"] = group.get(\"alpha\", 0.0)\n config[\"t_alpha\"] = group.get(\"t_alpha\", 0)\n config[\"t_beta3\"] = group.get(\"t_beta3\", 0)\n config[\"optim_bits\"] = self.args.optim_bits\n config[\"min_8bit_size\"] = self.args.min_8bit_size\n config[\"percentile_clipping\"] = self.args.percentile_clipping\n config[\"block_wise\"] = self.args.block_wise\n config[\"max_unorm\"] = self.args.max_unorm\n config[\"skip_zeros\"] = self.args.skip_zeros\n\n if (gindex, pindex) in self.mng.index2config:\n config.update(self.mng.index2config[(gindex, pindex)])\n return config\n\n def init_state(self, group, p, gindex, pindex):\n raise NotImplementedError(\"init_state method needs to be overridden\")\n\n def update_step(self, group, p, gindex, pindex):\n raise NotImplementedError(\"The update_step method needs to be overridden\")\n\n def get_state_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros_like(p, dtype=dtype, device=p.device)\n else:\n # > 1 MB\n buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n def prefetch_state(self, p):\n if self.is_paged:\n state = self.state[p]\n s1 = state[\"state1\"]\n is_paged = getattr(s1, \"is_paged\", False)\n if is_paged:\n F.prefetch_tensor(state[\"state1\"])\n if \"state2\" in state:\n F.prefetch_tensor(state[\"state2\"])\n\n\nclass Optimizer2State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.prefetch_state","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.prefetch_state#L336-L344","kind":"function","name":"prefetch_state","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":336,"end_line":344,"context_start_line":316,"context_end_line":364,"code":" if (gindex, pindex) in self.mng.index2config:\n config.update(self.mng.index2config[(gindex, pindex)])\n return config\n\n def init_state(self, group, p, gindex, pindex):\n raise NotImplementedError(\"init_state method needs to be overridden\")\n\n def update_step(self, group, p, gindex, pindex):\n raise NotImplementedError(\"The update_step method needs to be overridden\")\n\n def get_state_buffer(self, p, dtype=torch.float32):\n if not self.is_paged or p.numel() < 1e5:\n return torch.zeros_like(p, dtype=dtype, device=p.device)\n else:\n # > 1 MB\n buff = F.get_paged(*p.shape, dtype=dtype, device=p.device)\n F.fill(buff, 0)\n self.page_mng.paged_tensors.append(buff)\n return buff\n\n def prefetch_state(self, p):\n if self.is_paged:\n state = self.state[p]\n s1 = state[\"state1\"]\n is_paged = getattr(s1, \"is_paged\", False)\n if is_paged:\n F.prefetch_tensor(state[\"state1\"])\n if \"state2\" in state:\n F.prefetch_tensor(state[\"state2\"])\n\n\nclass Optimizer2State(Optimizer8bit):\n def __init__(\n self,\n optimizer_name,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0.0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n max_unorm=0.0,\n skip_zeros=False,\n is_paged=False,\n alpha=0.0,","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.cast","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.cast#L191-L211","kind":"function","name":"cast","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":191,"end_line":211,"context_start_line":171,"context_end_line":231,"code":" saved_groups = state_dict[\"param_groups\"]\n\n if len(groups) != len(saved_groups):\n raise ValueError(\"loaded state dict has a different number of parameter groups\")\n param_lens = (len(g[\"params\"]) for g in groups)\n saved_lens = (len(g[\"params\"]) for g in saved_groups)\n if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):\n raise ValueError(\n \"loaded state dict contains a parameter group that doesn't match the size of optimizer's group\",\n )\n\n # Update the state\n id_map = {\n old_id: p\n for old_id, p in zip(\n chain.from_iterable(g[\"params\"] for g in saved_groups),\n chain.from_iterable(g[\"params\"] for g in groups),\n )\n }\n\n def cast(param, value):\n r\"\"\"Make a deep copy of value, casting all tensors to device of param.\"\"\"\n if isinstance(value, torch.Tensor):\n # Floating-point types are a bit special here. They are the only ones\n # that are assumed to always match the type of params.\n if param.is_floating_point() and value.dtype != torch.uint8:\n value = value.to(param.dtype)\n return value\n elif isinstance(value, dict):\n for k, v in value.items():\n if k in self.non_castable_tensor_keys:\n if move_to_device:\n value[k] = v.to(param.device)\n else:\n value[k] = cast(param, v)\n\n return value\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(cast(param, v) for v in value)\n else:\n return value\n\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.optimizer.update_group","uri":"program://bitsandbytes/function/bitsandbytes.optim.optimizer.update_group#L225-L227","kind":"function","name":"update_group","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":225,"end_line":227,"context_start_line":205,"context_end_line":247,"code":" value[k] = cast(param, v)\n\n return value\n elif isinstance(value, container_abcs.Iterable):\n return type(value)(cast(param, v) for v in value)\n else:\n return value\n\n # Copy state assigned to params (and cast tensors to appropriate types).\n # State that is not assigned to params is copied as is (needed for\n # backward compatibility).\n state = defaultdict(dict)\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n state[param] = cast(param, v)\n else:\n state[k] = v\n\n # Update parameter groups, setting their 'params' value\n def update_group(group, new_group):\n new_group[\"params\"] = group[\"params\"]\n return new_group\n\n param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]\n self.__setstate__({\"state\": state, \"param_groups\": param_groups})\n\n def to_gpu(self):\n for gindex, group in enumerate(self.param_groups):\n for pindex, p in enumerate(group[\"params\"]):\n if p in self.state:\n values = self.state[p]\n for k, v in values.items():\n if isinstance(v, torch.Tensor):\n is_paged = getattr(v, \"is_paged\", False)\n if not is_paged:\n self.state[p][k] = v.to(p.device)\n\n def check_overrides(self):\n for module, attr, config in self.mng.module_weight_config_triple:\n pmodule = getattr(module, attr)\n assert pmodule is not None\n assert isinstance(pmodule, torch.Tensor) or isinstance(pmodule, torch.Parameter)","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adagrad","uri":"program://bitsandbytes/module/bitsandbytes.optim.adagrad#L1-L207","kind":"module","name":"bitsandbytes.optim.adagrad","path":"bitsandbytes/optim/adagrad.py","language":"python","start_line":1,"end_line":207,"context_start_line":1,"context_end_line":207,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass Adagrad(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Base Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=8,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 8):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n assert block_wise\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"11bd8832311487e78a5bc7dba1aa9a166fd9b9f722f3b05315c271317baa287a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adagrad.Adagrad","uri":"program://bitsandbytes/class/bitsandbytes.optim.adagrad.Adagrad#L8-L72","kind":"class","name":"Adagrad","path":"bitsandbytes/optim/adagrad.py","language":"python","start_line":8,"end_line":72,"context_start_line":1,"context_end_line":92,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass Adagrad(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n Base Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=8,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit Adagrad optimizer.\n","source_hash":"11bd8832311487e78a5bc7dba1aa9a166fd9b9f722f3b05315c271317baa287a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adagrad.Adagrad8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adagrad.Adagrad8bit#L75-L140","kind":"class","name":"Adagrad8bit","path":"bitsandbytes/optim/adagrad.py","language":"python","start_line":75,"end_line":140,"context_start_line":55,"context_end_line":160,"code":" raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=8,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 8-bit Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 8):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n assert block_wise\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit Adagrad optimizer.\n","source_hash":"11bd8832311487e78a5bc7dba1aa9a166fd9b9f722f3b05315c271317baa287a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adagrad.Adagrad32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.adagrad.Adagrad32bit#L143-L207","kind":"class","name":"Adagrad32bit","path":"bitsandbytes/optim/adagrad.py","language":"python","start_line":143,"end_line":207,"context_start_line":123,"context_end_line":207,"code":" if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n assert block_wise\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"11bd8832311487e78a5bc7dba1aa9a166fd9b9f722f3b05315c271317baa287a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.adagrad.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.adagrad.__init__#L144-L207","kind":"function","name":"__init__","path":"bitsandbytes/optim/adagrad.py","language":"python","start_line":144,"end_line":207,"context_start_line":124,"context_end_line":207,"code":" raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n assert block_wise\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )\n\n\nclass Adagrad32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n ):\n \"\"\"\n 32-bit Adagrad optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`, defaults to 1e-2):\n The learning rate.\n lr_decay (`int`, defaults to 0):\n The learning rate decay.\n weight_decay (`float`, defaults to 0.0):\n The weight decay value for the optimizer.\n initial_accumulator_value (`int`, defaults to 0):\n The initial momemtum values.\n eps (`float`, defaults to 1e-10):\n The epsilon value prevents division by zero in the optimizer.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n block_wise (`bool`, defaults to `True`):\n Whether to independently quantize each block of tensors to reduce outlier effects and improve stability.\n \"\"\"\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if initial_accumulator_value != 0.0:\n raise ValueError(\"Initial accumulator value != 0.0 not supported!\")\n if lr_decay != 0.0:\n raise ValueError(\"Lr Decay != 0.0 not supported!\")\n super().__init__(\n \"adagrad\",\n params,\n lr,\n (0.0, 0.0),\n eps,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n block_wise,\n )","source_hash":"11bd8832311487e78a5bc7dba1aa9a166fd9b9f722f3b05315c271317baa287a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars","uri":"program://bitsandbytes/module/bitsandbytes.optim.lars#L1-L274","kind":"module","name":"bitsandbytes.optim.lars","path":"bitsandbytes/optim/lars.py","language":"python","start_line":1,"end_line":274,"context_start_line":1,"context_end_line":274,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nfrom torch.optim import Optimizer\n\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass LARS(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n Base LARS optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n max_unorm (`float`, defaults to 0.02):\n The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass LARS8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n 8-bit LARS optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n max_unorm (`float`, defaults to 0.02):\n The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass LARS32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n 32-bit LARS optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n max_unorm (`float`, defaults to 0.02):\n The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass PytorchLARS(Optimizer):\n def __init__(\n self,\n params,\n lr=0.01,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n max_unorm=0.02,\n ):\n if lr < 0.0:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if momentum < 0.0:\n raise ValueError(f\"Invalid momentum value: {momentum}\")\n if weight_decay < 0.0:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n max_unorm=max_unorm,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n for group in self.param_groups:\n group.setdefault(\"nesterov\", False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n max_unorm = group[\"max_unorm\"]\n lr = group[\"lr\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n state = self.state[p]\n d_p = p.grad\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n\n if momentum != 0:\n buf = state.get(\"momentum_buffer\", None)\n\n if buf is None:\n buf = torch.clone(d_p).detach()\n state[\"momentum_buffer\"] = buf\n else:\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n\n if nesterov:\n update = d_p + buf * momentum\n else:\n update = buf\n\n update_scale = 1.0\n if max_unorm > 0.0:\n assert p.dtype == torch.float32\n pnorm = torch.norm(p.detach())\n unorm = torch.norm(update)\n if unorm > max_unorm * pnorm:\n update_scale = max_unorm * pnorm / unorm\n\n p.add_(update, alpha=-lr * update_scale)\n\n return loss","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.LARS","uri":"program://bitsandbytes/class/bitsandbytes.optim.lars.LARS#L11-L68","kind":"class","name":"LARS","path":"bitsandbytes/optim/lars.py","language":"python","start_line":11,"end_line":68,"context_start_line":1,"context_end_line":88,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nfrom torch.optim import Optimizer\n\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass LARS(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n Base LARS optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n optim_bits (`int`, defaults to 32):\n The number of bits of the optimizer state.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n max_unorm (`float`, defaults to 0.02):\n The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass LARS8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n 8-bit LARS optimizer.\n\n Arguments:","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.LARS8bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lars.LARS8bit#L71-L125","kind":"class","name":"LARS8bit","path":"bitsandbytes/optim/lars.py","language":"python","start_line":71,"end_line":125,"context_start_line":51,"context_end_line":145,"code":" The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n optim_bits,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass LARS8bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n 8-bit LARS optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n max_unorm (`float`, defaults to 0.02):\n The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass LARS32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n 32-bit LARS optimizer.\n\n Arguments:","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.LARS32bit","uri":"program://bitsandbytes/class/bitsandbytes.optim.lars.LARS32bit#L128-L182","kind":"class","name":"LARS32bit","path":"bitsandbytes/optim/lars.py","language":"python","start_line":128,"end_line":182,"context_start_line":108,"context_end_line":202,"code":" The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 8,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass LARS32bit(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n max_unorm=0.02,\n ):\n \"\"\"\n 32-bit LARS optimizer.\n\n Arguments:\n params (`torch.tensor`):\n The input parameters to optimize.\n lr (`float`):\n The learning rate.\n momentum (`float`, defaults to 0):\n The momentum value speeds up the optimizer by taking bigger steps.\n dampening (`float`, defaults to 0):\n The dampening value reduces the momentum of the optimizer.\n weight_decay (`float`, defaults to 1e-2):\n The weight decay value for the optimizer.\n nesterov (`bool`, defaults to `False`):\n Whether to use Nesterov momentum.\n args (`object`, defaults to `None`):\n An object with additional arguments.\n min_8bit_size (`int`, defaults to 4096):\n The minimum number of elements of the parameter tensors for 8-bit optimization.\n percentile_clipping (`int`, defaults to 100):\n Adapts clipping threshold automatically by tracking the last 100 gradient norms and clipping the gradient at a certain percentile to improve stability.\n max_unorm (`float`, defaults to 0.02):\n The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass PytorchLARS(Optimizer):\n def __init__(\n self,\n params,\n lr=0.01,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n max_unorm=0.02,\n ):\n if lr < 0.0:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if momentum < 0.0:\n raise ValueError(f\"Invalid momentum value: {momentum}\")\n if weight_decay < 0.0:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.PytorchLARS","uri":"program://bitsandbytes/class/bitsandbytes.optim.lars.PytorchLARS#L185-L274","kind":"class","name":"PytorchLARS","path":"bitsandbytes/optim/lars.py","language":"python","start_line":185,"end_line":274,"context_start_line":165,"context_end_line":274,"code":" The maximum gradient norm.\n \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass PytorchLARS(Optimizer):\n def __init__(\n self,\n params,\n lr=0.01,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n max_unorm=0.02,\n ):\n if lr < 0.0:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if momentum < 0.0:\n raise ValueError(f\"Invalid momentum value: {momentum}\")\n if weight_decay < 0.0:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n max_unorm=max_unorm,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n for group in self.param_groups:\n group.setdefault(\"nesterov\", False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n max_unorm = group[\"max_unorm\"]\n lr = group[\"lr\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n state = self.state[p]\n d_p = p.grad\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n\n if momentum != 0:\n buf = state.get(\"momentum_buffer\", None)\n\n if buf is None:\n buf = torch.clone(d_p).detach()\n state[\"momentum_buffer\"] = buf\n else:\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n\n if nesterov:\n update = d_p + buf * momentum\n else:\n update = buf\n\n update_scale = 1.0\n if max_unorm > 0.0:\n assert p.dtype == torch.float32\n pnorm = torch.norm(p.detach())\n unorm = torch.norm(update)\n if unorm > max_unorm * pnorm:\n update_scale = max_unorm * pnorm / unorm\n\n p.add_(update, alpha=-lr * update_scale)\n\n return loss","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.__init__","uri":"program://bitsandbytes/function/bitsandbytes.optim.lars.__init__#L186-L213","kind":"function","name":"__init__","path":"bitsandbytes/optim/lars.py","language":"python","start_line":186,"end_line":213,"context_start_line":166,"context_end_line":233,"code":" \"\"\"\n if momentum == 0:\n raise NotImplementedError(\"LARS without momentum is not supported!\")\n super().__init__(\n \"lars\",\n params,\n lr,\n (momentum, dampening),\n 0.0,\n weight_decay,\n 32,\n args,\n min_8bit_size,\n percentile_clipping,\n max_unorm=max_unorm,\n block_wise=False,\n )\n\n\nclass PytorchLARS(Optimizer):\n def __init__(\n self,\n params,\n lr=0.01,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n max_unorm=0.02,\n ):\n if lr < 0.0:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if momentum < 0.0:\n raise ValueError(f\"Invalid momentum value: {momentum}\")\n if weight_decay < 0.0:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n max_unorm=max_unorm,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n for group in self.param_groups:\n group.setdefault(\"nesterov\", False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.__setstate__","uri":"program://bitsandbytes/function/bitsandbytes.optim.lars.__setstate__#L215-L218","kind":"function","name":"__setstate__","path":"bitsandbytes/optim/lars.py","language":"python","start_line":215,"end_line":218,"context_start_line":195,"context_end_line":238,"code":" ):\n if lr < 0.0:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if momentum < 0.0:\n raise ValueError(f\"Invalid momentum value: {momentum}\")\n if weight_decay < 0.0:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n max_unorm=max_unorm,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n for group in self.param_groups:\n group.setdefault(\"nesterov\", False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n max_unorm = group[\"max_unorm\"]","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.optim.lars.step","uri":"program://bitsandbytes/function/bitsandbytes.optim.lars.step#L221-L274","kind":"function","name":"step","path":"bitsandbytes/optim/lars.py","language":"python","start_line":221,"end_line":274,"context_start_line":201,"context_end_line":274,"code":" raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n\n defaults = dict(\n lr=lr,\n momentum=momentum,\n dampening=dampening,\n weight_decay=weight_decay,\n nesterov=nesterov,\n max_unorm=max_unorm,\n )\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n for group in self.param_groups:\n group.setdefault(\"nesterov\", False)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n dampening = group[\"dampening\"]\n nesterov = group[\"nesterov\"]\n max_unorm = group[\"max_unorm\"]\n lr = group[\"lr\"]\n\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n\n state = self.state[p]\n d_p = p.grad\n if weight_decay != 0:\n d_p = d_p.add(p, alpha=weight_decay)\n\n if momentum != 0:\n buf = state.get(\"momentum_buffer\", None)\n\n if buf is None:\n buf = torch.clone(d_p).detach()\n state[\"momentum_buffer\"] = buf\n else:\n buf.mul_(momentum).add_(d_p, alpha=1 - dampening)\n\n if nesterov:\n update = d_p + buf * momentum\n else:\n update = buf\n\n update_scale = 1.0\n if max_unorm > 0.0:\n assert p.dtype == torch.float32\n pnorm = torch.norm(p.detach())\n unorm = torch.norm(update)\n if unorm > max_unorm * pnorm:\n update_scale = max_unorm * pnorm / unorm\n\n p.add_(update, alpha=-lr * update_scale)\n\n return loss","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda","uri":"program://bitsandbytes/module/bitsandbytes.diagnostics.cuda#L1-L219","kind":"module","name":"bitsandbytes.diagnostics.cuda","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":1,"end_line":219,"context_start_line":1,"context_end_line":219,"code":"from collections.abc import Iterable, Iterator\nimport logging\nimport os\nfrom pathlib import Path\n\nimport torch\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\nfrom bitsandbytes.diagnostics.utils import print_dedented\n\nCUDART_PATH_PREFERRED_ENVVARS = (\"CONDA_PREFIX\", \"LD_LIBRARY_PATH\")\n\nCUDART_PATH_IGNORED_ENVVARS = {\n \"DBUS_SESSION_BUS_ADDRESS\", # hardware related\n \"GOOGLE_VM_CONFIG_LOCK_FILE\", # GCP: requires elevated permissions, causing problems in VMs and Jupyter notebooks\n \"HOME\", # Linux shell default\n \"LESSCLOSE\",\n \"LESSOPEN\", # related to the `less` command\n \"MAIL\", # something related to emails\n \"OLDPWD\",\n \"PATH\", # this is for finding binaries, not libraries\n \"PWD\", # PWD: this is how the shell keeps track of the current working dir\n \"SHELL\", # binary for currently invoked shell\n \"SSH_AUTH_SOCK\", # SSH stuff, therefore unrelated\n \"SSH_TTY\",\n \"TMUX\", # Terminal Multiplexer\n \"XDG_DATA_DIRS\", # XDG: Desktop environment stuff\n \"XDG_GREETER_DATA_DIR\", # XDG: Desktop environment stuff\n \"XDG_RUNTIME_DIR\",\n \"_\", # current Python interpreter\n}\n\nCUDA_RUNTIME_LIB_PATTERNS = (\n (\"libamdhip64.so*\",)\n if HIP_ENVIRONMENT\n else (\n \"cudart64*.dll\", # Windows\n \"libcudart*.so*\", # libcudart.so, libcudart.so.11.0, libcudart.so.12.0, libcudart.so.12.1, libcudart.so.12.2 etc.\n \"nvcuda*.dll\", # Windows\n )\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]:\n for dir_string in paths_list_candidate.split(os.pathsep):\n if not dir_string:\n continue\n if os.sep not in dir_string:\n continue\n try:\n dir = Path(dir_string)\n try:\n if not dir.exists():\n logger.warning(f\"The directory listed in your path is found to be non-existent: {dir}\")\n continue\n except OSError: # Assume an esoteric error trying to poke at the directory\n pass\n for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:\n for pth in dir.glob(lib_pattern):\n if pth.is_file() and not pth.is_symlink():\n yield pth\n except (OSError, PermissionError):\n pass\n\n\ndef is_relevant_candidate_env_var(env_var: str, value: str) -> bool:\n return (\n env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location\n or (\n os.sep in value # might contain a path\n and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored\n and \"CONDA\" not in env_var # not another conda envvar\n and \"BASH_FUNC\" not in env_var # not a bash function defined via envvar\n and \"\\n\" not in value # likely e.g. a script or something?\n )\n )\n\n\ndef get_potentially_lib_path_containing_env_vars() -> dict[str, str]:\n return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}\n\n\ndef find_cudart_libraries() -> Iterator[Path]:\n \"\"\"\n Searches for a cuda installations, in the following order of priority:\n 1. active conda env\n 2. LD_LIBRARY_PATH\n 3. any other env vars, while ignoring those that\n - are known to be unrelated\n - don't contain the path separator `/`\n\n If multiple libraries are found in part 3, we optimistically try one,\n while giving a warning message.\n \"\"\"\n candidate_env_vars = get_potentially_lib_path_containing_env_vars()\n\n for envvar in CUDART_PATH_PREFERRED_ENVVARS:\n if envvar in candidate_env_vars:\n directory = candidate_env_vars[envvar]\n yield from find_cuda_libraries_in_path_list(directory)\n candidate_env_vars.pop(envvar)\n\n for env_var, value in candidate_env_vars.items():\n yield from find_cuda_libraries_in_path_list(value)\n\n\ndef _print_cuda_diagnostics(cuda_specs: CUDASpecs) -> None:\n print(\n f\"PyTorch settings found: CUDA_VERSION={cuda_specs.cuda_version_string}, \"\n f\"Highest Compute Capability: {cuda_specs.highest_compute_capability}.\",\n )\n\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}. Maybe you need to compile it from source?\n \"\"\",\n )\n\n # 7.5 is the minimum CC for int8 tensor cores\n if not cuda_specs.has_imma:\n print_dedented(\n \"\"\"\n WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!\n If you run into issues with 8-bit matmul, you can try 4-bit quantization:\n https://huggingface.co/blog/4bit-transformers-bitsandbytes\n \"\"\",\n )\n\n\ndef _print_hip_diagnostics(cuda_specs: CUDASpecs) -> None:\n print(f\"PyTorch settings found: ROCM_VERSION={cuda_specs.cuda_version_string}\")\n\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}.\n Maybe you need to compile it from source? If you compiled from source, check that ROCm version\n in PyTorch Settings matches your ROCm install. If not, reinstall PyTorch for your ROCm version\n and rebuild bitsandbytes.\n \"\"\",\n )\n\n hip_major, hip_minor = cuda_specs.cuda_version_tuple\n if (hip_major, hip_minor) < (6, 1):\n print_dedented(\n \"\"\"\n WARNING: bitsandbytes is fully supported only from ROCm 6.1.\n \"\"\",\n )\n\n\ndef print_diagnostics(cuda_specs: CUDASpecs) -> None:\n if HIP_ENVIRONMENT:\n _print_hip_diagnostics(cuda_specs)\n else:\n _print_cuda_diagnostics(cuda_specs)\n\n\ndef _print_cuda_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate CUDA runtime files (see below).\n\n We select the PyTorch default CUDA runtime, which is {torch.version.cuda},\n but this might mismatch with the CUDA version that is needed for bitsandbytes.\n To override this behavior set the `BNB_CUDA_VERSION=` environmental variable.\n\n For example, if you want to use the CUDA version 122,\n BNB_CUDA_VERSION=122 python ...\n\n OR set the environmental variable in your .bashrc:\n export BNB_CUDA_VERSION=122\n\n In the case of a manual override, make sure you set LD_LIBRARY_PATH, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2,\n \"\"\",\n )\n for pth in cudart_paths:\n print(f\"* Found CUDA runtime at: {pth}\")\n\n\ndef _print_hip_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"WARNING! ROCm runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate ROCm runtime files (see below).\n\n We select the PyTorch default ROCm runtime, which is {torch.version.hip},\n but this might mismatch with the ROCm version that is needed for bitsandbytes.\n\n To resolve it, install PyTorch built for the ROCm version you want to use\n\n and set LD_LIBRARY_PATH to your ROCm install path, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm-6.1.2/lib,\n \"\"\",\n )\n\n for pth in cudart_paths:\n print(f\"* Found ROCm runtime at: {pth}\")\n\n\ndef print_runtime_diagnostics() -> None:\n if HIP_ENVIRONMENT:\n _print_hip_runtime_diagnostics()\n else:\n _print_cuda_runtime_diagnostics()","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda.find_cuda_libraries_in_path_list","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda.find_cuda_libraries_in_path_list#L47-L66","kind":"function","name":"find_cuda_libraries_in_path_list","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":47,"end_line":66,"context_start_line":27,"context_end_line":86,"code":" \"TMUX\", # Terminal Multiplexer\n \"XDG_DATA_DIRS\", # XDG: Desktop environment stuff\n \"XDG_GREETER_DATA_DIR\", # XDG: Desktop environment stuff\n \"XDG_RUNTIME_DIR\",\n \"_\", # current Python interpreter\n}\n\nCUDA_RUNTIME_LIB_PATTERNS = (\n (\"libamdhip64.so*\",)\n if HIP_ENVIRONMENT\n else (\n \"cudart64*.dll\", # Windows\n \"libcudart*.so*\", # libcudart.so, libcudart.so.11.0, libcudart.so.12.0, libcudart.so.12.1, libcudart.so.12.2 etc.\n \"nvcuda*.dll\", # Windows\n )\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef find_cuda_libraries_in_path_list(paths_list_candidate: str) -> Iterable[Path]:\n for dir_string in paths_list_candidate.split(os.pathsep):\n if not dir_string:\n continue\n if os.sep not in dir_string:\n continue\n try:\n dir = Path(dir_string)\n try:\n if not dir.exists():\n logger.warning(f\"The directory listed in your path is found to be non-existent: {dir}\")\n continue\n except OSError: # Assume an esoteric error trying to poke at the directory\n pass\n for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:\n for pth in dir.glob(lib_pattern):\n if pth.is_file() and not pth.is_symlink():\n yield pth\n except (OSError, PermissionError):\n pass\n\n\ndef is_relevant_candidate_env_var(env_var: str, value: str) -> bool:\n return (\n env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location\n or (\n os.sep in value # might contain a path\n and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored\n and \"CONDA\" not in env_var # not another conda envvar\n and \"BASH_FUNC\" not in env_var # not a bash function defined via envvar\n and \"\\n\" not in value # likely e.g. a script or something?\n )\n )\n\n\ndef get_potentially_lib_path_containing_env_vars() -> dict[str, str]:\n return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}\n\n\ndef find_cudart_libraries() -> Iterator[Path]:","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda.is_relevant_candidate_env_var","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda.is_relevant_candidate_env_var#L69-L79","kind":"function","name":"is_relevant_candidate_env_var","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":69,"end_line":79,"context_start_line":49,"context_end_line":99,"code":" if not dir_string:\n continue\n if os.sep not in dir_string:\n continue\n try:\n dir = Path(dir_string)\n try:\n if not dir.exists():\n logger.warning(f\"The directory listed in your path is found to be non-existent: {dir}\")\n continue\n except OSError: # Assume an esoteric error trying to poke at the directory\n pass\n for lib_pattern in CUDA_RUNTIME_LIB_PATTERNS:\n for pth in dir.glob(lib_pattern):\n if pth.is_file() and not pth.is_symlink():\n yield pth\n except (OSError, PermissionError):\n pass\n\n\ndef is_relevant_candidate_env_var(env_var: str, value: str) -> bool:\n return (\n env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location\n or (\n os.sep in value # might contain a path\n and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored\n and \"CONDA\" not in env_var # not another conda envvar\n and \"BASH_FUNC\" not in env_var # not a bash function defined via envvar\n and \"\\n\" not in value # likely e.g. a script or something?\n )\n )\n\n\ndef get_potentially_lib_path_containing_env_vars() -> dict[str, str]:\n return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}\n\n\ndef find_cudart_libraries() -> Iterator[Path]:\n \"\"\"\n Searches for a cuda installations, in the following order of priority:\n 1. active conda env\n 2. LD_LIBRARY_PATH\n 3. any other env vars, while ignoring those that\n - are known to be unrelated\n - don't contain the path separator `/`\n\n If multiple libraries are found in part 3, we optimistically try one,\n while giving a warning message.\n \"\"\"\n candidate_env_vars = get_potentially_lib_path_containing_env_vars()\n","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda.get_potentially_lib_path_containing_env_vars","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda.get_potentially_lib_path_containing_env_vars#L82-L83","kind":"function","name":"get_potentially_lib_path_containing_env_vars","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":82,"end_line":83,"context_start_line":62,"context_end_line":103,"code":" for pth in dir.glob(lib_pattern):\n if pth.is_file() and not pth.is_symlink():\n yield pth\n except (OSError, PermissionError):\n pass\n\n\ndef is_relevant_candidate_env_var(env_var: str, value: str) -> bool:\n return (\n env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location\n or (\n os.sep in value # might contain a path\n and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored\n and \"CONDA\" not in env_var # not another conda envvar\n and \"BASH_FUNC\" not in env_var # not a bash function defined via envvar\n and \"\\n\" not in value # likely e.g. a script or something?\n )\n )\n\n\ndef get_potentially_lib_path_containing_env_vars() -> dict[str, str]:\n return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}\n\n\ndef find_cudart_libraries() -> Iterator[Path]:\n \"\"\"\n Searches for a cuda installations, in the following order of priority:\n 1. active conda env\n 2. LD_LIBRARY_PATH\n 3. any other env vars, while ignoring those that\n - are known to be unrelated\n - don't contain the path separator `/`\n\n If multiple libraries are found in part 3, we optimistically try one,\n while giving a warning message.\n \"\"\"\n candidate_env_vars = get_potentially_lib_path_containing_env_vars()\n\n for envvar in CUDART_PATH_PREFERRED_ENVVARS:\n if envvar in candidate_env_vars:\n directory = candidate_env_vars[envvar]\n yield from find_cuda_libraries_in_path_list(directory)","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda.find_cudart_libraries","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda.find_cudart_libraries#L86-L107","kind":"function","name":"find_cudart_libraries","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":86,"end_line":107,"context_start_line":66,"context_end_line":127,"code":" pass\n\n\ndef is_relevant_candidate_env_var(env_var: str, value: str) -> bool:\n return (\n env_var in CUDART_PATH_PREFERRED_ENVVARS # is a preferred location\n or (\n os.sep in value # might contain a path\n and env_var not in CUDART_PATH_IGNORED_ENVVARS # not ignored\n and \"CONDA\" not in env_var # not another conda envvar\n and \"BASH_FUNC\" not in env_var # not a bash function defined via envvar\n and \"\\n\" not in value # likely e.g. a script or something?\n )\n )\n\n\ndef get_potentially_lib_path_containing_env_vars() -> dict[str, str]:\n return {env_var: value for env_var, value in os.environ.items() if is_relevant_candidate_env_var(env_var, value)}\n\n\ndef find_cudart_libraries() -> Iterator[Path]:\n \"\"\"\n Searches for a cuda installations, in the following order of priority:\n 1. active conda env\n 2. LD_LIBRARY_PATH\n 3. any other env vars, while ignoring those that\n - are known to be unrelated\n - don't contain the path separator `/`\n\n If multiple libraries are found in part 3, we optimistically try one,\n while giving a warning message.\n \"\"\"\n candidate_env_vars = get_potentially_lib_path_containing_env_vars()\n\n for envvar in CUDART_PATH_PREFERRED_ENVVARS:\n if envvar in candidate_env_vars:\n directory = candidate_env_vars[envvar]\n yield from find_cuda_libraries_in_path_list(directory)\n candidate_env_vars.pop(envvar)\n\n for env_var, value in candidate_env_vars.items():\n yield from find_cuda_libraries_in_path_list(value)\n\n\ndef _print_cuda_diagnostics(cuda_specs: CUDASpecs) -> None:\n print(\n f\"PyTorch settings found: CUDA_VERSION={cuda_specs.cuda_version_string}, \"\n f\"Highest Compute Capability: {cuda_specs.highest_compute_capability}.\",\n )\n\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}. Maybe you need to compile it from source?\n \"\"\",\n )\n\n # 7.5 is the minimum CC for int8 tensor cores\n if not cuda_specs.has_imma:\n print_dedented(\n \"\"\"","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda._print_cuda_diagnostics","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda._print_cuda_diagnostics#L110-L132","kind":"function","name":"_print_cuda_diagnostics","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":110,"end_line":132,"context_start_line":90,"context_end_line":152,"code":" 2. LD_LIBRARY_PATH\n 3. any other env vars, while ignoring those that\n - are known to be unrelated\n - don't contain the path separator `/`\n\n If multiple libraries are found in part 3, we optimistically try one,\n while giving a warning message.\n \"\"\"\n candidate_env_vars = get_potentially_lib_path_containing_env_vars()\n\n for envvar in CUDART_PATH_PREFERRED_ENVVARS:\n if envvar in candidate_env_vars:\n directory = candidate_env_vars[envvar]\n yield from find_cuda_libraries_in_path_list(directory)\n candidate_env_vars.pop(envvar)\n\n for env_var, value in candidate_env_vars.items():\n yield from find_cuda_libraries_in_path_list(value)\n\n\ndef _print_cuda_diagnostics(cuda_specs: CUDASpecs) -> None:\n print(\n f\"PyTorch settings found: CUDA_VERSION={cuda_specs.cuda_version_string}, \"\n f\"Highest Compute Capability: {cuda_specs.highest_compute_capability}.\",\n )\n\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}. Maybe you need to compile it from source?\n \"\"\",\n )\n\n # 7.5 is the minimum CC for int8 tensor cores\n if not cuda_specs.has_imma:\n print_dedented(\n \"\"\"\n WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!\n If you run into issues with 8-bit matmul, you can try 4-bit quantization:\n https://huggingface.co/blog/4bit-transformers-bitsandbytes\n \"\"\",\n )\n\n\ndef _print_hip_diagnostics(cuda_specs: CUDASpecs) -> None:\n print(f\"PyTorch settings found: ROCM_VERSION={cuda_specs.cuda_version_string}\")\n\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}.\n Maybe you need to compile it from source? If you compiled from source, check that ROCm version\n in PyTorch Settings matches your ROCm install. If not, reinstall PyTorch for your ROCm version\n and rebuild bitsandbytes.\n \"\"\",\n )\n\n hip_major, hip_minor = cuda_specs.cuda_version_tuple\n if (hip_major, hip_minor) < (6, 1):\n print_dedented(\n \"\"\"","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda._print_hip_diagnostics","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda._print_hip_diagnostics#L135-L155","kind":"function","name":"_print_hip_diagnostics","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":135,"end_line":155,"context_start_line":115,"context_end_line":175,"code":"\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}. Maybe you need to compile it from source?\n \"\"\",\n )\n\n # 7.5 is the minimum CC for int8 tensor cores\n if not cuda_specs.has_imma:\n print_dedented(\n \"\"\"\n WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!\n If you run into issues with 8-bit matmul, you can try 4-bit quantization:\n https://huggingface.co/blog/4bit-transformers-bitsandbytes\n \"\"\",\n )\n\n\ndef _print_hip_diagnostics(cuda_specs: CUDASpecs) -> None:\n print(f\"PyTorch settings found: ROCM_VERSION={cuda_specs.cuda_version_string}\")\n\n binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}.\n Maybe you need to compile it from source? If you compiled from source, check that ROCm version\n in PyTorch Settings matches your ROCm install. If not, reinstall PyTorch for your ROCm version\n and rebuild bitsandbytes.\n \"\"\",\n )\n\n hip_major, hip_minor = cuda_specs.cuda_version_tuple\n if (hip_major, hip_minor) < (6, 1):\n print_dedented(\n \"\"\"\n WARNING: bitsandbytes is fully supported only from ROCm 6.1.\n \"\"\",\n )\n\n\ndef print_diagnostics(cuda_specs: CUDASpecs) -> None:\n if HIP_ENVIRONMENT:\n _print_hip_diagnostics(cuda_specs)\n else:\n _print_cuda_diagnostics(cuda_specs)\n\n\ndef _print_cuda_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate CUDA runtime files (see below).\n\n We select the PyTorch default CUDA runtime, which is {torch.version.cuda},\n but this might mismatch with the CUDA version that is needed for bitsandbytes.","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda.print_diagnostics","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda.print_diagnostics#L158-L162","kind":"function","name":"print_diagnostics","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":158,"end_line":162,"context_start_line":138,"context_end_line":182,"code":" binary_path = get_cuda_bnb_library_path(cuda_specs)\n if not binary_path.exists():\n print_dedented(\n f\"\"\"\n Library not found: {binary_path}.\n Maybe you need to compile it from source? If you compiled from source, check that ROCm version\n in PyTorch Settings matches your ROCm install. If not, reinstall PyTorch for your ROCm version\n and rebuild bitsandbytes.\n \"\"\",\n )\n\n hip_major, hip_minor = cuda_specs.cuda_version_tuple\n if (hip_major, hip_minor) < (6, 1):\n print_dedented(\n \"\"\"\n WARNING: bitsandbytes is fully supported only from ROCm 6.1.\n \"\"\",\n )\n\n\ndef print_diagnostics(cuda_specs: CUDASpecs) -> None:\n if HIP_ENVIRONMENT:\n _print_hip_diagnostics(cuda_specs)\n else:\n _print_cuda_diagnostics(cuda_specs)\n\n\ndef _print_cuda_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate CUDA runtime files (see below).\n\n We select the PyTorch default CUDA runtime, which is {torch.version.cuda},\n but this might mismatch with the CUDA version that is needed for bitsandbytes.\n To override this behavior set the `BNB_CUDA_VERSION=` environmental variable.\n\n For example, if you want to use the CUDA version 122,\n BNB_CUDA_VERSION=122 python ...\n\n OR set the environmental variable in your .bashrc:\n export BNB_CUDA_VERSION=122","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda._print_cuda_runtime_diagnostics","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda._print_cuda_runtime_diagnostics#L165-L189","kind":"function","name":"_print_cuda_runtime_diagnostics","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":165,"end_line":189,"context_start_line":145,"context_end_line":209,"code":" and rebuild bitsandbytes.\n \"\"\",\n )\n\n hip_major, hip_minor = cuda_specs.cuda_version_tuple\n if (hip_major, hip_minor) < (6, 1):\n print_dedented(\n \"\"\"\n WARNING: bitsandbytes is fully supported only from ROCm 6.1.\n \"\"\",\n )\n\n\ndef print_diagnostics(cuda_specs: CUDASpecs) -> None:\n if HIP_ENVIRONMENT:\n _print_hip_diagnostics(cuda_specs)\n else:\n _print_cuda_diagnostics(cuda_specs)\n\n\ndef _print_cuda_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"CUDA SETUP: WARNING! CUDA runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate CUDA runtime files (see below).\n\n We select the PyTorch default CUDA runtime, which is {torch.version.cuda},\n but this might mismatch with the CUDA version that is needed for bitsandbytes.\n To override this behavior set the `BNB_CUDA_VERSION=` environmental variable.\n\n For example, if you want to use the CUDA version 122,\n BNB_CUDA_VERSION=122 python ...\n\n OR set the environmental variable in your .bashrc:\n export BNB_CUDA_VERSION=122\n\n In the case of a manual override, make sure you set LD_LIBRARY_PATH, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2,\n \"\"\",\n )\n for pth in cudart_paths:\n print(f\"* Found CUDA runtime at: {pth}\")\n\n\ndef _print_hip_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"WARNING! ROCm runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate ROCm runtime files (see below).\n\n We select the PyTorch default ROCm runtime, which is {torch.version.hip},\n but this might mismatch with the ROCm version that is needed for bitsandbytes.\n\n To resolve it, install PyTorch built for the ROCm version you want to use\n\n and set LD_LIBRARY_PATH to your ROCm install path, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm-6.1.2/lib,\n \"\"\",\n )","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda._print_hip_runtime_diagnostics","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda._print_hip_runtime_diagnostics#L192-L212","kind":"function","name":"_print_hip_runtime_diagnostics","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":192,"end_line":212,"context_start_line":172,"context_end_line":219,"code":" Found duplicate CUDA runtime files (see below).\n\n We select the PyTorch default CUDA runtime, which is {torch.version.cuda},\n but this might mismatch with the CUDA version that is needed for bitsandbytes.\n To override this behavior set the `BNB_CUDA_VERSION=` environmental variable.\n\n For example, if you want to use the CUDA version 122,\n BNB_CUDA_VERSION=122 python ...\n\n OR set the environmental variable in your .bashrc:\n export BNB_CUDA_VERSION=122\n\n In the case of a manual override, make sure you set LD_LIBRARY_PATH, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-11.2,\n \"\"\",\n )\n for pth in cudart_paths:\n print(f\"* Found CUDA runtime at: {pth}\")\n\n\ndef _print_hip_runtime_diagnostics() -> None:\n cudart_paths = list(find_cudart_libraries())\n if not cudart_paths:\n print(\"WARNING! ROCm runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate ROCm runtime files (see below).\n\n We select the PyTorch default ROCm runtime, which is {torch.version.hip},\n but this might mismatch with the ROCm version that is needed for bitsandbytes.\n\n To resolve it, install PyTorch built for the ROCm version you want to use\n\n and set LD_LIBRARY_PATH to your ROCm install path, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm-6.1.2/lib,\n \"\"\",\n )\n\n for pth in cudart_paths:\n print(f\"* Found ROCm runtime at: {pth}\")\n\n\ndef print_runtime_diagnostics() -> None:\n if HIP_ENVIRONMENT:\n _print_hip_runtime_diagnostics()\n else:\n _print_cuda_runtime_diagnostics()","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.cuda.print_runtime_diagnostics","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.cuda.print_runtime_diagnostics#L215-L219","kind":"function","name":"print_runtime_diagnostics","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":215,"end_line":219,"context_start_line":195,"context_end_line":219,"code":" print(\"WARNING! ROCm runtime files not found in any environmental path.\")\n elif len(cudart_paths) > 1:\n print_dedented(\n f\"\"\"\n Found duplicate ROCm runtime files (see below).\n\n We select the PyTorch default ROCm runtime, which is {torch.version.hip},\n but this might mismatch with the ROCm version that is needed for bitsandbytes.\n\n To resolve it, install PyTorch built for the ROCm version you want to use\n\n and set LD_LIBRARY_PATH to your ROCm install path, e.g.\n export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm-6.1.2/lib,\n \"\"\",\n )\n\n for pth in cudart_paths:\n print(f\"* Found ROCm runtime at: {pth}\")\n\n\ndef print_runtime_diagnostics() -> None:\n if HIP_ENVIRONMENT:\n _print_hip_runtime_diagnostics()\n else:\n _print_cuda_runtime_diagnostics()","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.main","uri":"program://bitsandbytes/module/bitsandbytes.diagnostics.main#L1-L118","kind":"module","name":"bitsandbytes.diagnostics.main","path":"bitsandbytes/diagnostics/main.py","language":"python","start_line":1,"end_line":118,"context_start_line":1,"context_end_line":118,"code":"import importlib\nimport platform\nimport sys\nimport traceback\n\nimport torch\n\nfrom bitsandbytes import __version__ as bnb_version\nfrom bitsandbytes.cextension import BNB_BACKEND\nfrom bitsandbytes.consts import PACKAGE_GITHUB_URL\nfrom bitsandbytes.cuda_specs import get_cuda_specs\nfrom bitsandbytes.diagnostics.cuda import (\n print_diagnostics,\n)\nfrom bitsandbytes.diagnostics.utils import print_dedented, print_header\n\n_RELATED_PACKAGES = [\n \"accelerate\",\n \"diffusers\",\n \"numpy\",\n \"pip\",\n \"peft\",\n \"safetensors\",\n \"transformers\",\n \"triton\",\n \"trl\",\n]\n\n\ndef sanity_check():\n from bitsandbytes.optim import Adam\n\n p = torch.nn.Parameter(torch.rand(10, 10).cuda())\n a = torch.rand(10, 10).cuda()\n p1 = p.data.sum().item()\n adam = Adam([p])\n out = a * p\n loss = out.sum()\n loss.backward()\n adam.step()\n p2 = p.data.sum().item()\n assert p1 != p2\n\n\ndef get_package_version(name: str) -> str:\n try:\n version = importlib.metadata.version(name)\n except importlib.metadata.PackageNotFoundError:\n version = \"not found\"\n return version\n\n\ndef show_environment():\n \"\"\"Simple utility to print out environment information.\"\"\"\n\n print(f\"Platform: {platform.platform()}\")\n if platform.system() == \"Linux\":\n print(f\" libc: {'-'.join(platform.libc_ver())}\")\n\n print(f\"Python: {platform.python_version()}\")\n\n print(f\"PyTorch: {torch.__version__}\")\n print(f\" CUDA: {torch.version.cuda or 'N/A'}\")\n print(f\" HIP: {torch.version.hip or 'N/A'}\")\n print(f\" XPU: {getattr(torch.version, 'xpu', 'N/A') or 'N/A'}\")\n\n print(\"Related packages:\")\n for pkg in _RELATED_PACKAGES:\n version = get_package_version(pkg)\n print(f\" {pkg}: {version}\")\n\n\ndef main():\n print_header(f\"bitsandbytes v{bnb_version}\")\n show_environment()\n print_header(\"\")\n\n cuda_specs = get_cuda_specs()\n\n if cuda_specs:\n print_diagnostics(cuda_specs)\n\n # TODO: There's a lot of noise in this; needs improvement.\n # print_cuda_runtime_diagnostics()\n\n if not torch.cuda.is_available():\n print(f\"PyTorch says {BNB_BACKEND} is not available. Possible reasons:\")\n print(f\"1. {BNB_BACKEND} driver not installed\")\n print(\"2. Using a CPU-only PyTorch build\")\n print(\"3. No GPU detected\")\n\n else:\n print(f\"Checking that the library is importable and {BNB_BACKEND} is callable...\")\n\n try:\n sanity_check()\n print(\"SUCCESS!\")\n return\n except RuntimeError as e:\n if \"not available in CPU-only\" in str(e):\n print(\n f\"WARNING: {__package__} is currently running as CPU-only!\\n\"\n \"Therefore, 8-bit optimizers and GPU quantization are unavailable.\\n\\n\"\n f\"If you think that this is so erroneously,\\nplease report an issue!\",\n )\n else:\n raise e\n except Exception:\n traceback.print_exc()\n\n print_dedented(\n f\"\"\"\n Above we output some debug information.\n Please provide this info when creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose\n WARNING: Please be sure to sanitize sensitive info from the output before posting it.\n \"\"\",\n )\n sys.exit(1)","source_hash":"bb4642b3732c3fae5df6541390297413861ec536dc79db51dba3ba147d69e650","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.main.sanity_check","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.main.sanity_check#L30-L42","kind":"function","name":"sanity_check","path":"bitsandbytes/diagnostics/main.py","language":"python","start_line":30,"end_line":42,"context_start_line":10,"context_end_line":62,"code":"from bitsandbytes.consts import PACKAGE_GITHUB_URL\nfrom bitsandbytes.cuda_specs import get_cuda_specs\nfrom bitsandbytes.diagnostics.cuda import (\n print_diagnostics,\n)\nfrom bitsandbytes.diagnostics.utils import print_dedented, print_header\n\n_RELATED_PACKAGES = [\n \"accelerate\",\n \"diffusers\",\n \"numpy\",\n \"pip\",\n \"peft\",\n \"safetensors\",\n \"transformers\",\n \"triton\",\n \"trl\",\n]\n\n\ndef sanity_check():\n from bitsandbytes.optim import Adam\n\n p = torch.nn.Parameter(torch.rand(10, 10).cuda())\n a = torch.rand(10, 10).cuda()\n p1 = p.data.sum().item()\n adam = Adam([p])\n out = a * p\n loss = out.sum()\n loss.backward()\n adam.step()\n p2 = p.data.sum().item()\n assert p1 != p2\n\n\ndef get_package_version(name: str) -> str:\n try:\n version = importlib.metadata.version(name)\n except importlib.metadata.PackageNotFoundError:\n version = \"not found\"\n return version\n\n\ndef show_environment():\n \"\"\"Simple utility to print out environment information.\"\"\"\n\n print(f\"Platform: {platform.platform()}\")\n if platform.system() == \"Linux\":\n print(f\" libc: {'-'.join(platform.libc_ver())}\")\n\n print(f\"Python: {platform.python_version()}\")\n\n print(f\"PyTorch: {torch.__version__}\")","source_hash":"bb4642b3732c3fae5df6541390297413861ec536dc79db51dba3ba147d69e650","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.main.get_package_version","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.main.get_package_version#L45-L50","kind":"function","name":"get_package_version","path":"bitsandbytes/diagnostics/main.py","language":"python","start_line":45,"end_line":50,"context_start_line":25,"context_end_line":70,"code":" \"triton\",\n \"trl\",\n]\n\n\ndef sanity_check():\n from bitsandbytes.optim import Adam\n\n p = torch.nn.Parameter(torch.rand(10, 10).cuda())\n a = torch.rand(10, 10).cuda()\n p1 = p.data.sum().item()\n adam = Adam([p])\n out = a * p\n loss = out.sum()\n loss.backward()\n adam.step()\n p2 = p.data.sum().item()\n assert p1 != p2\n\n\ndef get_package_version(name: str) -> str:\n try:\n version = importlib.metadata.version(name)\n except importlib.metadata.PackageNotFoundError:\n version = \"not found\"\n return version\n\n\ndef show_environment():\n \"\"\"Simple utility to print out environment information.\"\"\"\n\n print(f\"Platform: {platform.platform()}\")\n if platform.system() == \"Linux\":\n print(f\" libc: {'-'.join(platform.libc_ver())}\")\n\n print(f\"Python: {platform.python_version()}\")\n\n print(f\"PyTorch: {torch.__version__}\")\n print(f\" CUDA: {torch.version.cuda or 'N/A'}\")\n print(f\" HIP: {torch.version.hip or 'N/A'}\")\n print(f\" XPU: {getattr(torch.version, 'xpu', 'N/A') or 'N/A'}\")\n\n print(\"Related packages:\")\n for pkg in _RELATED_PACKAGES:\n version = get_package_version(pkg)\n print(f\" {pkg}: {version}\")","source_hash":"bb4642b3732c3fae5df6541390297413861ec536dc79db51dba3ba147d69e650","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.main.show_environment","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.main.show_environment#L53-L70","kind":"function","name":"show_environment","path":"bitsandbytes/diagnostics/main.py","language":"python","start_line":53,"end_line":70,"context_start_line":33,"context_end_line":90,"code":" p = torch.nn.Parameter(torch.rand(10, 10).cuda())\n a = torch.rand(10, 10).cuda()\n p1 = p.data.sum().item()\n adam = Adam([p])\n out = a * p\n loss = out.sum()\n loss.backward()\n adam.step()\n p2 = p.data.sum().item()\n assert p1 != p2\n\n\ndef get_package_version(name: str) -> str:\n try:\n version = importlib.metadata.version(name)\n except importlib.metadata.PackageNotFoundError:\n version = \"not found\"\n return version\n\n\ndef show_environment():\n \"\"\"Simple utility to print out environment information.\"\"\"\n\n print(f\"Platform: {platform.platform()}\")\n if platform.system() == \"Linux\":\n print(f\" libc: {'-'.join(platform.libc_ver())}\")\n\n print(f\"Python: {platform.python_version()}\")\n\n print(f\"PyTorch: {torch.__version__}\")\n print(f\" CUDA: {torch.version.cuda or 'N/A'}\")\n print(f\" HIP: {torch.version.hip or 'N/A'}\")\n print(f\" XPU: {getattr(torch.version, 'xpu', 'N/A') or 'N/A'}\")\n\n print(\"Related packages:\")\n for pkg in _RELATED_PACKAGES:\n version = get_package_version(pkg)\n print(f\" {pkg}: {version}\")\n\n\ndef main():\n print_header(f\"bitsandbytes v{bnb_version}\")\n show_environment()\n print_header(\"\")\n\n cuda_specs = get_cuda_specs()\n\n if cuda_specs:\n print_diagnostics(cuda_specs)\n\n # TODO: There's a lot of noise in this; needs improvement.\n # print_cuda_runtime_diagnostics()\n\n if not torch.cuda.is_available():\n print(f\"PyTorch says {BNB_BACKEND} is not available. Possible reasons:\")\n print(f\"1. {BNB_BACKEND} driver not installed\")\n print(\"2. Using a CPU-only PyTorch build\")\n print(\"3. No GPU detected\")","source_hash":"bb4642b3732c3fae5df6541390297413861ec536dc79db51dba3ba147d69e650","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.main.main","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.main.main#L73-L118","kind":"function","name":"main","path":"bitsandbytes/diagnostics/main.py","language":"python","start_line":73,"end_line":118,"context_start_line":53,"context_end_line":118,"code":"def show_environment():\n \"\"\"Simple utility to print out environment information.\"\"\"\n\n print(f\"Platform: {platform.platform()}\")\n if platform.system() == \"Linux\":\n print(f\" libc: {'-'.join(platform.libc_ver())}\")\n\n print(f\"Python: {platform.python_version()}\")\n\n print(f\"PyTorch: {torch.__version__}\")\n print(f\" CUDA: {torch.version.cuda or 'N/A'}\")\n print(f\" HIP: {torch.version.hip or 'N/A'}\")\n print(f\" XPU: {getattr(torch.version, 'xpu', 'N/A') or 'N/A'}\")\n\n print(\"Related packages:\")\n for pkg in _RELATED_PACKAGES:\n version = get_package_version(pkg)\n print(f\" {pkg}: {version}\")\n\n\ndef main():\n print_header(f\"bitsandbytes v{bnb_version}\")\n show_environment()\n print_header(\"\")\n\n cuda_specs = get_cuda_specs()\n\n if cuda_specs:\n print_diagnostics(cuda_specs)\n\n # TODO: There's a lot of noise in this; needs improvement.\n # print_cuda_runtime_diagnostics()\n\n if not torch.cuda.is_available():\n print(f\"PyTorch says {BNB_BACKEND} is not available. Possible reasons:\")\n print(f\"1. {BNB_BACKEND} driver not installed\")\n print(\"2. Using a CPU-only PyTorch build\")\n print(\"3. No GPU detected\")\n\n else:\n print(f\"Checking that the library is importable and {BNB_BACKEND} is callable...\")\n\n try:\n sanity_check()\n print(\"SUCCESS!\")\n return\n except RuntimeError as e:\n if \"not available in CPU-only\" in str(e):\n print(\n f\"WARNING: {__package__} is currently running as CPU-only!\\n\"\n \"Therefore, 8-bit optimizers and GPU quantization are unavailable.\\n\\n\"\n f\"If you think that this is so erroneously,\\nplease report an issue!\",\n )\n else:\n raise e\n except Exception:\n traceback.print_exc()\n\n print_dedented(\n f\"\"\"\n Above we output some debug information.\n Please provide this info when creating an issue via {PACKAGE_GITHUB_URL}/issues/new/choose\n WARNING: Please be sure to sanitize sensitive info from the output before posting it.\n \"\"\",\n )\n sys.exit(1)","source_hash":"bb4642b3732c3fae5df6541390297413861ec536dc79db51dba3ba147d69e650","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.utils","uri":"program://bitsandbytes/module/bitsandbytes.diagnostics.utils#L1-L12","kind":"module","name":"bitsandbytes.diagnostics.utils","path":"bitsandbytes/diagnostics/utils.py","language":"python","start_line":1,"end_line":12,"context_start_line":1,"context_end_line":12,"code":"import textwrap\n\nHEADER_WIDTH = 60\n\n\ndef print_header(txt: str, width: int = HEADER_WIDTH, filler: str = \"=\") -> None:\n txt = f\" {txt} \" if txt else \"\"\n print(txt.center(width, filler))\n\n\ndef print_dedented(text):\n print(\"\\n\".join(textwrap.dedent(text).strip().split(\"\\n\")))","source_hash":"102888dbbf6387bf3defb45664520e9264c91b88353bf62240bc0fe2249aed3c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.utils.print_header","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.utils.print_header#L6-L8","kind":"function","name":"print_header","path":"bitsandbytes/diagnostics/utils.py","language":"python","start_line":6,"end_line":8,"context_start_line":1,"context_end_line":12,"code":"import textwrap\n\nHEADER_WIDTH = 60\n\n\ndef print_header(txt: str, width: int = HEADER_WIDTH, filler: str = \"=\") -> None:\n txt = f\" {txt} \" if txt else \"\"\n print(txt.center(width, filler))\n\n\ndef print_dedented(text):\n print(\"\\n\".join(textwrap.dedent(text).strip().split(\"\\n\")))","source_hash":"102888dbbf6387bf3defb45664520e9264c91b88353bf62240bc0fe2249aed3c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.diagnostics.utils.print_dedented","uri":"program://bitsandbytes/function/bitsandbytes.diagnostics.utils.print_dedented#L11-L12","kind":"function","name":"print_dedented","path":"bitsandbytes/diagnostics/utils.py","language":"python","start_line":11,"end_line":12,"context_start_line":1,"context_end_line":12,"code":"import textwrap\n\nHEADER_WIDTH = 60\n\n\ndef print_header(txt: str, width: int = HEADER_WIDTH, filler: str = \"=\") -> None:\n txt = f\" {txt} \" if txt else \"\"\n print(txt.center(width, filler))\n\n\ndef print_dedented(text):\n print(\"\\n\".join(textwrap.dedent(text).strip().split(\"\\n\")))","source_hash":"102888dbbf6387bf3defb45664520e9264c91b88353bf62240bc0fe2249aed3c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules","uri":"program://bitsandbytes/module/bitsandbytes.nn.triton_based_modules#L1-L264","kind":"module","name":"bitsandbytes.nn.triton_based_modules","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":1,"end_line":264,"context_start_line":1,"context_end_line":264,"code":"from functools import partial\n\nimport torch\nimport torch.nn as nn\n\nfrom bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise\nfrom bitsandbytes.triton.int8_matmul_mixed_dequantize import (\n int8_matmul_mixed_dequantize,\n)\nfrom bitsandbytes.triton.int8_matmul_rowwise_dequantize import (\n int8_matmul_rowwise_dequantize,\n)\nfrom bitsandbytes.triton.quantize_columnwise_and_transpose import (\n quantize_columnwise_and_transpose,\n)\nfrom bitsandbytes.triton.quantize_global import (\n quantize_global,\n quantize_global_transpose,\n)\nfrom bitsandbytes.triton.quantize_rowwise import quantize_rowwise\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\n\nclass _switchback_global(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n\n # rowwise quantize for X, global quantize for W\n X_int8, state_X = quantize_rowwise(X)\n W_int8, state_W = quantize_global(W)\n\n # save for backward.\n ctx.save_for_backward = X, W\n\n # matmult, fused dequant and add bias\n # call \"mixed\" because we are mixing rowwise quantized and global quantized\n return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):\n # reshape input to [N_out * L, D]\n G = G_3D.reshape(-1, G_3D.size(-1))\n\n grad_X = grad_W = grad_bias = None\n\n X, W = ctx.save_for_backward\n if ctx.needs_input_grad[0]:\n # rowwise quantize for G, global quantize for W\n # for W, we also fuse the transpose operation because only A @ B^T is supported\n # so we transpose once then call .t() in the matmul\n G_int8, state_G = quantize_rowwise(G)\n W_int8, state_W = quantize_global_transpose(W)\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(\n *G_3D.size()[:-1],\n -1,\n )\n if ctx.needs_input_grad[1]:\n # backward pass uses standard weight grad\n grad_W = torch.matmul(G.t(), X.to(G.dtype))\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n\n return grad_X, grad_W, grad_bias\n\n\nclass _switchback_vectorrize(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n\n ctx.save_for_backward = X, W\n # rowwise quantize for X\n # columnwise quantize for W (first rowwise, transpose later)\n X_int8, state_X = quantize_rowwise(X)\n W_int8, state_W = quantize_rowwise(W)\n\n # matmult, fused dequant and add bias\n # call kernel which expects rowwise quantized X and W\n return int8_matmul_rowwise_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):\n X, W = ctx.save_for_backward\n\n G = G_3D.reshape(-1, G_3D.size(-1))\n\n grad_X = grad_W = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n # rowwise quantize for G, columnwise quantize for W and fused transpose\n # we call .t() for weight later because only A @ B^T is supported\n G_int8, state_G = quantize_rowwise(G)\n W_int8, state_W = quantize_columnwise_and_transpose(W)\n grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(\n *G_3D.size()[:-1],\n -1,\n )\n if ctx.needs_input_grad[1]:\n # backward pass uses standard weight grad\n grad_W = torch.matmul(G.t(), X.to(G.dtype))\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n\n return grad_X, grad_W, grad_bias\n\n\nclass _switchback_global_mem_efficient(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n X_3D_sz = X_3D.size()\n\n # rowwise quantize for X, global quantize for W\n X_int8, state_X = quantize_rowwise(X)\n del X\n W_int8, state_W = quantize_global(W)\n\n # save for backward.\n ctx.save_for_backward = X_int8, state_X, W_int8, state_W\n\n # matmult, fused dequant and add bias\n # call \"mixed\" because we are mixing rowwise quantized and global quantized\n return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D_sz[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):\n # reshape input to [N_out * L, D]\n G = G_3D.reshape(-1, G_3D.size(-1))\n G_3D_sz = G_3D.size()\n\n grad_X = grad_W = grad_bias = None\n\n X_int8, state_X, W_int8, state_W = ctx.save_for_backward\n if ctx.needs_input_grad[1]:\n real_X = dequantize_rowwise(X_int8, state_X)\n del X_int8\n grad_W = torch.matmul(G.t(), real_X.to(G.dtype))\n del real_X\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n if ctx.needs_input_grad[0]:\n G_int8, state_G = quantize_rowwise(G)\n del G\n W_int8 = W_int8.t().contiguous()\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1)\n\n return grad_X, grad_W, grad_bias\n\n\nclass SwitchBackLinear(nn.Linear):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n device=None,\n dtype=None,\n vector_wise_quantization: bool = False,\n mem_efficient: bool = False,\n ):\n super().__init__(in_features, out_features, bias, device, dtype)\n\n if not is_triton_available():\n raise ImportError(\"\"\"Could not import triton. Please install triton to use SwitchBackLinear.\n Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower\"\"\")\n\n # By default, we use the global quantization.\n self.vector_wise_quantization = vector_wise_quantization\n if self.vector_wise_quantization:\n self._fn = _switchback_vectorrize\n if mem_efficient:\n print(\"mem efficient is not supported for vector-wise quantization.\")\n exit(1)\n else:\n if mem_efficient:\n self._fn = _switchback_global_mem_efficient\n else:\n self._fn = _switchback_global\n\n def prepare_for_eval(self):\n # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.\n # Note this is experimental and not tested thoroughly.\n # Note this needs to be explicitly called with something like\n # def cond_prepare(m):\n # if hasattr(m, \"prepare_for_eval\"):\n # m.prepare_for_eval()\n # model.apply(cond_prepare)\n print(\"=> preparing for eval.\")\n if self.vector_wise_quantization:\n W_int8, state_W = quantize_rowwise(self.weight)\n else:\n W_int8, state_W = quantize_global(self.weight)\n\n self.register_buffer(\"W_int8\", W_int8)\n self.register_buffer(\"state_W\", state_W)\n\n del self.weight\n\n def forward(self, x):\n if self.training:\n return self._fn.apply(x, self.weight, self.bias)\n else:\n # If it hasn't been \"prepared for eval\", run the standard forward pass.\n if not hasattr(self, \"W_int8\"):\n return self._fn.apply(x, self.weight, self.bias)\n\n # Otherwise, use pre-computed weights.\n X = x.view(-1, x.size(-1))\n X_int8, state_X = quantize_rowwise(X)\n\n if self.vector_wise_quantization:\n return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n else:\n return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n\n\nSwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)\nSwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)\nSwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)\n\n\n# This is just the standard linear function.\nclass StandardLinearFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, bias=None):\n X = input.view(-1, input.size(-1))\n\n ctx.save_for_backward(X, weight, bias)\n output = input.matmul(weight.t())\n if bias is not None:\n output += bias.unsqueeze(0).expand_as(output)\n return output.view(*input.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, grad_output_3D):\n input, weight, bias = ctx.saved_tensors\n\n grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))\n\n grad_input = grad_weight = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))\n if bias is not None and ctx.needs_input_grad[2]:\n grad_bias = grad_output.sum(0)\n\n return grad_input, grad_weight, grad_bias\n\n\nclass StandardLinear(nn.Linear):\n def forward(self, x):\n return StandardLinearFunction.apply(x, self.weight, self.bias)","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules._switchback_global","uri":"program://bitsandbytes/class/bitsandbytes.nn.triton_based_modules._switchback_global#L24-L65","kind":"class","name":"_switchback_global","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":24,"end_line":65,"context_start_line":4,"context_end_line":85,"code":"import torch.nn as nn\n\nfrom bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise\nfrom bitsandbytes.triton.int8_matmul_mixed_dequantize import (\n int8_matmul_mixed_dequantize,\n)\nfrom bitsandbytes.triton.int8_matmul_rowwise_dequantize import (\n int8_matmul_rowwise_dequantize,\n)\nfrom bitsandbytes.triton.quantize_columnwise_and_transpose import (\n quantize_columnwise_and_transpose,\n)\nfrom bitsandbytes.triton.quantize_global import (\n quantize_global,\n quantize_global_transpose,\n)\nfrom bitsandbytes.triton.quantize_rowwise import quantize_rowwise\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\n\nclass _switchback_global(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n\n # rowwise quantize for X, global quantize for W\n X_int8, state_X = quantize_rowwise(X)\n W_int8, state_W = quantize_global(W)\n\n # save for backward.\n ctx.save_for_backward = X, W\n\n # matmult, fused dequant and add bias\n # call \"mixed\" because we are mixing rowwise quantized and global quantized\n return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):\n # reshape input to [N_out * L, D]\n G = G_3D.reshape(-1, G_3D.size(-1))\n\n grad_X = grad_W = grad_bias = None\n\n X, W = ctx.save_for_backward\n if ctx.needs_input_grad[0]:\n # rowwise quantize for G, global quantize for W\n # for W, we also fuse the transpose operation because only A @ B^T is supported\n # so we transpose once then call .t() in the matmul\n G_int8, state_G = quantize_rowwise(G)\n W_int8, state_W = quantize_global_transpose(W)\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(\n *G_3D.size()[:-1],\n -1,\n )\n if ctx.needs_input_grad[1]:\n # backward pass uses standard weight grad\n grad_W = torch.matmul(G.t(), X.to(G.dtype))\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n\n return grad_X, grad_W, grad_bias\n\n\nclass _switchback_vectorrize(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n\n ctx.save_for_backward = X, W\n # rowwise quantize for X\n # columnwise quantize for W (first rowwise, transpose later)\n X_int8, state_X = quantize_rowwise(X)\n W_int8, state_W = quantize_rowwise(W)\n\n # matmult, fused dequant and add bias\n # call kernel which expects rowwise quantized X and W\n return int8_matmul_rowwise_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules._switchback_vectorrize","uri":"program://bitsandbytes/class/bitsandbytes.nn.triton_based_modules._switchback_vectorrize#L68-L107","kind":"class","name":"_switchback_vectorrize","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":68,"end_line":107,"context_start_line":48,"context_end_line":127,"code":" X, W = ctx.save_for_backward\n if ctx.needs_input_grad[0]:\n # rowwise quantize for G, global quantize for W\n # for W, we also fuse the transpose operation because only A @ B^T is supported\n # so we transpose once then call .t() in the matmul\n G_int8, state_G = quantize_rowwise(G)\n W_int8, state_W = quantize_global_transpose(W)\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(\n *G_3D.size()[:-1],\n -1,\n )\n if ctx.needs_input_grad[1]:\n # backward pass uses standard weight grad\n grad_W = torch.matmul(G.t(), X.to(G.dtype))\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n\n return grad_X, grad_W, grad_bias\n\n\nclass _switchback_vectorrize(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n\n ctx.save_for_backward = X, W\n # rowwise quantize for X\n # columnwise quantize for W (first rowwise, transpose later)\n X_int8, state_X = quantize_rowwise(X)\n W_int8, state_W = quantize_rowwise(W)\n\n # matmult, fused dequant and add bias\n # call kernel which expects rowwise quantized X and W\n return int8_matmul_rowwise_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):\n X, W = ctx.save_for_backward\n\n G = G_3D.reshape(-1, G_3D.size(-1))\n\n grad_X = grad_W = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n # rowwise quantize for G, columnwise quantize for W and fused transpose\n # we call .t() for weight later because only A @ B^T is supported\n G_int8, state_G = quantize_rowwise(G)\n W_int8, state_W = quantize_columnwise_and_transpose(W)\n grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(\n *G_3D.size()[:-1],\n -1,\n )\n if ctx.needs_input_grad[1]:\n # backward pass uses standard weight grad\n grad_W = torch.matmul(G.t(), X.to(G.dtype))\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n\n return grad_X, grad_W, grad_bias\n\n\nclass _switchback_global_mem_efficient(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n X_3D_sz = X_3D.size()\n\n # rowwise quantize for X, global quantize for W\n X_int8, state_X = quantize_rowwise(X)\n del X\n W_int8, state_W = quantize_global(W)\n\n # save for backward.\n ctx.save_for_backward = X_int8, state_X, W_int8, state_W\n\n # matmult, fused dequant and add bias\n # call \"mixed\" because we are mixing rowwise quantized and global quantized\n return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D_sz[:-1], -1)","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules._switchback_global_mem_efficient","uri":"program://bitsandbytes/class/bitsandbytes.nn.triton_based_modules._switchback_global_mem_efficient#L110-L151","kind":"class","name":"_switchback_global_mem_efficient","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":110,"end_line":151,"context_start_line":90,"context_end_line":171,"code":" grad_X = grad_W = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n # rowwise quantize for G, columnwise quantize for W and fused transpose\n # we call .t() for weight later because only A @ B^T is supported\n G_int8, state_G = quantize_rowwise(G)\n W_int8, state_W = quantize_columnwise_and_transpose(W)\n grad_X = int8_matmul_rowwise_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(\n *G_3D.size()[:-1],\n -1,\n )\n if ctx.needs_input_grad[1]:\n # backward pass uses standard weight grad\n grad_W = torch.matmul(G.t(), X.to(G.dtype))\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n\n return grad_X, grad_W, grad_bias\n\n\nclass _switchback_global_mem_efficient(torch.autograd.Function):\n @staticmethod\n def forward(ctx, X_3D, W, bias):\n # reshape input to [N * L, D]\n X = X_3D.view(-1, X_3D.size(-1))\n X_3D_sz = X_3D.size()\n\n # rowwise quantize for X, global quantize for W\n X_int8, state_X = quantize_rowwise(X)\n del X\n W_int8, state_W = quantize_global(W)\n\n # save for backward.\n ctx.save_for_backward = X_int8, state_X, W_int8, state_W\n\n # matmult, fused dequant and add bias\n # call \"mixed\" because we are mixing rowwise quantized and global quantized\n return int8_matmul_mixed_dequantize(X_int8, W_int8.t(), state_X, state_W, bias).view(*X_3D_sz[:-1], -1)\n\n @staticmethod\n def backward(ctx, G_3D):\n # reshape input to [N_out * L, D]\n G = G_3D.reshape(-1, G_3D.size(-1))\n G_3D_sz = G_3D.size()\n\n grad_X = grad_W = grad_bias = None\n\n X_int8, state_X, W_int8, state_W = ctx.save_for_backward\n if ctx.needs_input_grad[1]:\n real_X = dequantize_rowwise(X_int8, state_X)\n del X_int8\n grad_W = torch.matmul(G.t(), real_X.to(G.dtype))\n del real_X\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n if ctx.needs_input_grad[0]:\n G_int8, state_G = quantize_rowwise(G)\n del G\n W_int8 = W_int8.t().contiguous()\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1)\n\n return grad_X, grad_W, grad_bias\n\n\nclass SwitchBackLinear(nn.Linear):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n device=None,\n dtype=None,\n vector_wise_quantization: bool = False,\n mem_efficient: bool = False,\n ):\n super().__init__(in_features, out_features, bias, device, dtype)\n\n if not is_triton_available():\n raise ImportError(\"\"\"Could not import triton. Please install triton to use SwitchBackLinear.\n Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower\"\"\")\n\n # By default, we use the global quantization.","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.SwitchBackLinear","uri":"program://bitsandbytes/class/bitsandbytes.nn.triton_based_modules.SwitchBackLinear#L154-L224","kind":"class","name":"SwitchBackLinear","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":154,"end_line":224,"context_start_line":134,"context_end_line":244,"code":"\n grad_X = grad_W = grad_bias = None\n\n X_int8, state_X, W_int8, state_W = ctx.save_for_backward\n if ctx.needs_input_grad[1]:\n real_X = dequantize_rowwise(X_int8, state_X)\n del X_int8\n grad_W = torch.matmul(G.t(), real_X.to(G.dtype))\n del real_X\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n if ctx.needs_input_grad[0]:\n G_int8, state_G = quantize_rowwise(G)\n del G\n W_int8 = W_int8.t().contiguous()\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1)\n\n return grad_X, grad_W, grad_bias\n\n\nclass SwitchBackLinear(nn.Linear):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n device=None,\n dtype=None,\n vector_wise_quantization: bool = False,\n mem_efficient: bool = False,\n ):\n super().__init__(in_features, out_features, bias, device, dtype)\n\n if not is_triton_available():\n raise ImportError(\"\"\"Could not import triton. Please install triton to use SwitchBackLinear.\n Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower\"\"\")\n\n # By default, we use the global quantization.\n self.vector_wise_quantization = vector_wise_quantization\n if self.vector_wise_quantization:\n self._fn = _switchback_vectorrize\n if mem_efficient:\n print(\"mem efficient is not supported for vector-wise quantization.\")\n exit(1)\n else:\n if mem_efficient:\n self._fn = _switchback_global_mem_efficient\n else:\n self._fn = _switchback_global\n\n def prepare_for_eval(self):\n # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.\n # Note this is experimental and not tested thoroughly.\n # Note this needs to be explicitly called with something like\n # def cond_prepare(m):\n # if hasattr(m, \"prepare_for_eval\"):\n # m.prepare_for_eval()\n # model.apply(cond_prepare)\n print(\"=> preparing for eval.\")\n if self.vector_wise_quantization:\n W_int8, state_W = quantize_rowwise(self.weight)\n else:\n W_int8, state_W = quantize_global(self.weight)\n\n self.register_buffer(\"W_int8\", W_int8)\n self.register_buffer(\"state_W\", state_W)\n\n del self.weight\n\n def forward(self, x):\n if self.training:\n return self._fn.apply(x, self.weight, self.bias)\n else:\n # If it hasn't been \"prepared for eval\", run the standard forward pass.\n if not hasattr(self, \"W_int8\"):\n return self._fn.apply(x, self.weight, self.bias)\n\n # Otherwise, use pre-computed weights.\n X = x.view(-1, x.size(-1))\n X_int8, state_X = quantize_rowwise(X)\n\n if self.vector_wise_quantization:\n return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n else:\n return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n\n\nSwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)\nSwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)\nSwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)\n\n\n# This is just the standard linear function.\nclass StandardLinearFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, bias=None):\n X = input.view(-1, input.size(-1))\n\n ctx.save_for_backward(X, weight, bias)\n output = input.matmul(weight.t())\n if bias is not None:\n output += bias.unsqueeze(0).expand_as(output)\n return output.view(*input.size()[:-1], -1)\n\n @staticmethod","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.StandardLinearFunction","uri":"program://bitsandbytes/class/bitsandbytes.nn.triton_based_modules.StandardLinearFunction#L233-L259","kind":"class","name":"StandardLinearFunction","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":233,"end_line":259,"context_start_line":213,"context_end_line":264,"code":" X_int8, state_X = quantize_rowwise(X)\n\n if self.vector_wise_quantization:\n return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n else:\n return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n\n\nSwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)\nSwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)\nSwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)\n\n\n# This is just the standard linear function.\nclass StandardLinearFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, bias=None):\n X = input.view(-1, input.size(-1))\n\n ctx.save_for_backward(X, weight, bias)\n output = input.matmul(weight.t())\n if bias is not None:\n output += bias.unsqueeze(0).expand_as(output)\n return output.view(*input.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, grad_output_3D):\n input, weight, bias = ctx.saved_tensors\n\n grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))\n\n grad_input = grad_weight = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))\n if bias is not None and ctx.needs_input_grad[2]:\n grad_bias = grad_output.sum(0)\n\n return grad_input, grad_weight, grad_bias\n\n\nclass StandardLinear(nn.Linear):\n def forward(self, x):\n return StandardLinearFunction.apply(x, self.weight, self.bias)","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.StandardLinear","uri":"program://bitsandbytes/class/bitsandbytes.nn.triton_based_modules.StandardLinear#L262-L264","kind":"class","name":"StandardLinear","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":262,"end_line":264,"context_start_line":242,"context_end_line":264,"code":" return output.view(*input.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, grad_output_3D):\n input, weight, bias = ctx.saved_tensors\n\n grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))\n\n grad_input = grad_weight = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))\n if bias is not None and ctx.needs_input_grad[2]:\n grad_bias = grad_output.sum(0)\n\n return grad_input, grad_weight, grad_bias\n\n\nclass StandardLinear(nn.Linear):\n def forward(self, x):\n return StandardLinearFunction.apply(x, self.weight, self.bias)","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.forward","uri":"program://bitsandbytes/function/bitsandbytes.nn.triton_based_modules.forward#L263-L264","kind":"function","name":"forward","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":263,"end_line":264,"context_start_line":243,"context_end_line":264,"code":"\n @staticmethod\n def backward(ctx, grad_output_3D):\n input, weight, bias = ctx.saved_tensors\n\n grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))\n\n grad_input = grad_weight = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))\n if bias is not None and ctx.needs_input_grad[2]:\n grad_bias = grad_output.sum(0)\n\n return grad_input, grad_weight, grad_bias\n\n\nclass StandardLinear(nn.Linear):\n def forward(self, x):\n return StandardLinearFunction.apply(x, self.weight, self.bias)","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.backward","uri":"program://bitsandbytes/function/bitsandbytes.nn.triton_based_modules.backward#L245-L259","kind":"function","name":"backward","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":245,"end_line":259,"context_start_line":225,"context_end_line":264,"code":"\n\nSwitchBackLinearGlobal = partial(SwitchBackLinear, vector_wise_quantization=False)\nSwitchBackLinearGlobalMemEfficient = partial(SwitchBackLinear, vector_wise_quantization=False, mem_efficient=True)\nSwitchBackLinearVectorwise = partial(SwitchBackLinear, vector_wise_quantization=True)\n\n\n# This is just the standard linear function.\nclass StandardLinearFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, weight, bias=None):\n X = input.view(-1, input.size(-1))\n\n ctx.save_for_backward(X, weight, bias)\n output = input.matmul(weight.t())\n if bias is not None:\n output += bias.unsqueeze(0).expand_as(output)\n return output.view(*input.size()[:-1], -1)\n\n @staticmethod\n def backward(ctx, grad_output_3D):\n input, weight, bias = ctx.saved_tensors\n\n grad_output = grad_output_3D.reshape(-1, grad_output_3D.size(-1))\n\n grad_input = grad_weight = grad_bias = None\n\n if ctx.needs_input_grad[0]:\n grad_input = grad_output.matmul(weight.to(grad_output.dtype)).view(*grad_output_3D.size()[:-1], -1)\n if ctx.needs_input_grad[1]:\n grad_weight = grad_output.t().matmul(input.to(grad_output.dtype))\n if bias is not None and ctx.needs_input_grad[2]:\n grad_bias = grad_output.sum(0)\n\n return grad_input, grad_weight, grad_bias\n\n\nclass StandardLinear(nn.Linear):\n def forward(self, x):\n return StandardLinearFunction.apply(x, self.weight, self.bias)","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.__init__","uri":"program://bitsandbytes/function/bitsandbytes.nn.triton_based_modules.__init__#L155-L182","kind":"function","name":"__init__","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":155,"end_line":182,"context_start_line":135,"context_end_line":202,"code":" grad_X = grad_W = grad_bias = None\n\n X_int8, state_X, W_int8, state_W = ctx.save_for_backward\n if ctx.needs_input_grad[1]:\n real_X = dequantize_rowwise(X_int8, state_X)\n del X_int8\n grad_W = torch.matmul(G.t(), real_X.to(G.dtype))\n del real_X\n if ctx.needs_input_grad[2]:\n grad_bias = G.sum(dim=0)\n if ctx.needs_input_grad[0]:\n G_int8, state_G = quantize_rowwise(G)\n del G\n W_int8 = W_int8.t().contiguous()\n grad_X = int8_matmul_mixed_dequantize(G_int8, W_int8.t(), state_G, state_W, None).view(*G_3D_sz[:-1], -1)\n\n return grad_X, grad_W, grad_bias\n\n\nclass SwitchBackLinear(nn.Linear):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n device=None,\n dtype=None,\n vector_wise_quantization: bool = False,\n mem_efficient: bool = False,\n ):\n super().__init__(in_features, out_features, bias, device, dtype)\n\n if not is_triton_available():\n raise ImportError(\"\"\"Could not import triton. Please install triton to use SwitchBackLinear.\n Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower\"\"\")\n\n # By default, we use the global quantization.\n self.vector_wise_quantization = vector_wise_quantization\n if self.vector_wise_quantization:\n self._fn = _switchback_vectorrize\n if mem_efficient:\n print(\"mem efficient is not supported for vector-wise quantization.\")\n exit(1)\n else:\n if mem_efficient:\n self._fn = _switchback_global_mem_efficient\n else:\n self._fn = _switchback_global\n\n def prepare_for_eval(self):\n # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.\n # Note this is experimental and not tested thoroughly.\n # Note this needs to be explicitly called with something like\n # def cond_prepare(m):\n # if hasattr(m, \"prepare_for_eval\"):\n # m.prepare_for_eval()\n # model.apply(cond_prepare)\n print(\"=> preparing for eval.\")\n if self.vector_wise_quantization:\n W_int8, state_W = quantize_rowwise(self.weight)\n else:\n W_int8, state_W = quantize_global(self.weight)\n\n self.register_buffer(\"W_int8\", W_int8)\n self.register_buffer(\"state_W\", state_W)\n\n del self.weight\n","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.triton_based_modules.prepare_for_eval","uri":"program://bitsandbytes/function/bitsandbytes.nn.triton_based_modules.prepare_for_eval#L184-L201","kind":"function","name":"prepare_for_eval","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":184,"end_line":201,"context_start_line":164,"context_end_line":221,"code":" ):\n super().__init__(in_features, out_features, bias, device, dtype)\n\n if not is_triton_available():\n raise ImportError(\"\"\"Could not import triton. Please install triton to use SwitchBackLinear.\n Alternatively, you can use bnb.nn.SwitchBackLinearBnb, but it will be slower\"\"\")\n\n # By default, we use the global quantization.\n self.vector_wise_quantization = vector_wise_quantization\n if self.vector_wise_quantization:\n self._fn = _switchback_vectorrize\n if mem_efficient:\n print(\"mem efficient is not supported for vector-wise quantization.\")\n exit(1)\n else:\n if mem_efficient:\n self._fn = _switchback_global_mem_efficient\n else:\n self._fn = _switchback_global\n\n def prepare_for_eval(self):\n # If we just want to do eval, we can pre-quantize the weights instead of doing it on the forward pass.\n # Note this is experimental and not tested thoroughly.\n # Note this needs to be explicitly called with something like\n # def cond_prepare(m):\n # if hasattr(m, \"prepare_for_eval\"):\n # m.prepare_for_eval()\n # model.apply(cond_prepare)\n print(\"=> preparing for eval.\")\n if self.vector_wise_quantization:\n W_int8, state_W = quantize_rowwise(self.weight)\n else:\n W_int8, state_W = quantize_global(self.weight)\n\n self.register_buffer(\"W_int8\", W_int8)\n self.register_buffer(\"state_W\", state_W)\n\n del self.weight\n\n def forward(self, x):\n if self.training:\n return self._fn.apply(x, self.weight, self.bias)\n else:\n # If it hasn't been \"prepared for eval\", run the standard forward pass.\n if not hasattr(self, \"W_int8\"):\n return self._fn.apply(x, self.weight, self.bias)\n\n # Otherwise, use pre-computed weights.\n X = x.view(-1, x.size(-1))\n X_int8, state_X = quantize_rowwise(X)\n\n if self.vector_wise_quantization:\n return int8_matmul_rowwise_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(\n *x.size()[:-1],\n -1,\n )\n else:\n return int8_matmul_mixed_dequantize(X_int8, self.W_int8.t(), state_X, self.state_W, self.bias).view(","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize","uri":"program://bitsandbytes/module/bitsandbytes.nn.parametrize#L1-L192","kind":"module","name":"bitsandbytes.nn.parametrize","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":1,"end_line":192,"context_start_line":1,"context_end_line":192,"code":"from functools import partial\nfrom typing import Any, Literal, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.parametrize as P\n\nfrom .. import functional as F\n\n\nclass Bnb4bitParametrization(nn.Module):\n \"\"\"\n A parametrization module that handles dequantization of a 4-bit quantized parameter.\n\n The parameter data is expected to be already quantized when this parametrization is applied.\n This module will dequantize the parameter data to its original floating-point representation\n when the forward method is called (i.e. when the parameter is accessed).\n\n Args:\n quant_state (`F.QuantState`):\n The quantization state containing the necessary information for dequantization.\n \"\"\"\n\n def __init__(self, quant_state: F.QuantState):\n super().__init__()\n self.quant_state = quant_state\n\n @torch.no_grad()\n def forward(self, quantized_param: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass to dequantize the parameter.\n\n Args:\n quantized_param (`torch.Tensor`): The quantized parameter tensor (from .original)\n\n Returns:\n `torch.Tensor`: The dequantized parameter tensor in the original shape and dtype.\n \"\"\"\n return F.dequantize_4bit(quantized_param, self.quant_state)\n\n\ndef replace_parameter_4bit_prequantized(\n module: nn.Module, param_name: str, qs_dict: dict[str, Any], device: torch.device\n):\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n quant_state = F.QuantState.from_dict(qs_dict, device=device)\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef replace_parameter_4bit(\n module: nn.Module,\n param_name: str,\n compress_statistics: bool = False,\n quant_type: Literal[\"nf4\", \"fp4\"] = \"nf4\",\n blocksize: Optional[int] = None,\n):\n \"\"\"\n Replace a module parameter with a 4-bit quantized version using parametrization.\n\n This function quantizes an existing parameter in a PyTorch module to 4-bit precision\n and sets up parametrization to handle automatic dequantization during forward passes.\n The original parameter is replaced with quantized data, and a parametrization layer\n is registered to manage the quantization state and dequantization process.\n\n Additional, it registers a state dict post-hook to ensure that the quantization state\n is saved correctly when the model's state dict is saved.\n\n It is useful for MoE models or other scenarios where you want to quantize parameters\n outside of nn.Linear layers without changing the model's architecture.\n\n This feature is experimental and may change in future releases.\n\n Args:\n module (`nn.Module`):\n The PyTorch module containing the parameter to be quantized.\n param_name (`str`):\n The name of the parameter within the module to quantize.\n compress_statistics (`bool`, *optional*, defaults to `False`):\n Whether to compress quantization statistics to reduce memory usage.\n quant_type (`Literal[\"nf4\", \"fp4\"]`, *optional*, defaults to `\"nf4\"`):\n The quantization format to use.\n blocksize (`int`, *optional*, defaults to `None`):\n The block size for quantization. If None, uses the default block size.\n\n Raises:\n AttributeError: If the module does not have the specified parameter.\n TypeError: If the specified attribute is not an instance of nn.Parameter.\n \"\"\"\n\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n # Quantize the original parameter.\n quantized_data, quant_state = F.quantize_4bit(\n original_param.data,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n\n # Replace the parameter with the quantized data.\n setattr(module, param_name, nn.Parameter(quantized_data, requires_grad=False))\n del original_param\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef _disable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...], output: Any):\n P._cache_enabled -= 1\n if not P._cache_enabled:\n P._cache = {}\n\n\ndef _enable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...]):\n P._cache_enabled += 1\n\n\ndef _register_parametrization_hooks(module: nn.Module, param_name: str):\n # Register a state dict hook for saving. Note that this requires torch >= 2.5.0.\n if torch.__version__ >= (2, 5):\n module.register_state_dict_post_hook(\n partial(\n _parametrized_state_dict_post_hook,\n param_name=param_name,\n )\n )\n\n # Register hooks to enable caching for the dequantization parametrization.\n # This helps preserve time and memory when the same quantized parameter\n # is accessed multiple times in the forward computation.\n module.register_forward_pre_hook(_enable_parametrization_cache)\n module.register_forward_hook(_disable_parametrization_cache)\n\n\ndef _parametrized_state_dict_post_hook(\n module: nn.Module,\n state_dict: dict[str, Any],\n prefix: str,\n local_metadata: Any,\n *,\n param_name: str = \"weight\",\n **kwargs: dict[str, Any],\n) -> None:\n \"\"\"\n Hook to modify the state dict to include the quantization state.\n \"\"\"\n\n original_key = f\"{prefix}parametrizations.{param_name}.original\"\n\n if original_key in state_dict:\n # Create a clean entry.\n # The `parametrizations.{param_name}.original` key will have the quantized data,\n # but we would like it to keep it in the state_dict as `{param_name}`.\n clean_key = f\"{prefix}{param_name}\"\n state_dict[clean_key] = state_dict.pop(original_key)\n\n assert P.is_parametrized(module, param_name)\n\n # Find the parametrization, which should have the quantization state.\n parametrization: Bnb4bitParametrization = next(\n filter(lambda x: isinstance(x, Bnb4bitParametrization), module.parametrizations[param_name]), None\n )\n\n assert parametrization is not None, \"Parametrization not found for the parameter.\"\n\n quant_state = parametrization.quant_state\n\n # Next, we need to store the quantization state.\n if quant_state is not None:\n for k, v in quant_state.as_dict(packed=True).items():\n state_dict[f\"{prefix}{param_name}.{k}\"] = v","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize.Bnb4bitParametrization","uri":"program://bitsandbytes/class/bitsandbytes.nn.parametrize.Bnb4bitParametrization#L11-L39","kind":"class","name":"Bnb4bitParametrization","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":11,"end_line":39,"context_start_line":1,"context_end_line":59,"code":"from functools import partial\nfrom typing import Any, Literal, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.parametrize as P\n\nfrom .. import functional as F\n\n\nclass Bnb4bitParametrization(nn.Module):\n \"\"\"\n A parametrization module that handles dequantization of a 4-bit quantized parameter.\n\n The parameter data is expected to be already quantized when this parametrization is applied.\n This module will dequantize the parameter data to its original floating-point representation\n when the forward method is called (i.e. when the parameter is accessed).\n\n Args:\n quant_state (`F.QuantState`):\n The quantization state containing the necessary information for dequantization.\n \"\"\"\n\n def __init__(self, quant_state: F.QuantState):\n super().__init__()\n self.quant_state = quant_state\n\n @torch.no_grad()\n def forward(self, quantized_param: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass to dequantize the parameter.\n\n Args:\n quantized_param (`torch.Tensor`): The quantized parameter tensor (from .original)\n\n Returns:\n `torch.Tensor`: The dequantized parameter tensor in the original shape and dtype.\n \"\"\"\n return F.dequantize_4bit(quantized_param, self.quant_state)\n\n\ndef replace_parameter_4bit_prequantized(\n module: nn.Module, param_name: str, qs_dict: dict[str, Any], device: torch.device\n):\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n quant_state = F.QuantState.from_dict(qs_dict, device=device)\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize.replace_parameter_4bit_prequantized","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize.replace_parameter_4bit_prequantized#L42-L59","kind":"function","name":"replace_parameter_4bit_prequantized","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":42,"end_line":59,"context_start_line":22,"context_end_line":79,"code":" \"\"\"\n\n def __init__(self, quant_state: F.QuantState):\n super().__init__()\n self.quant_state = quant_state\n\n @torch.no_grad()\n def forward(self, quantized_param: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass to dequantize the parameter.\n\n Args:\n quantized_param (`torch.Tensor`): The quantized parameter tensor (from .original)\n\n Returns:\n `torch.Tensor`: The dequantized parameter tensor in the original shape and dtype.\n \"\"\"\n return F.dequantize_4bit(quantized_param, self.quant_state)\n\n\ndef replace_parameter_4bit_prequantized(\n module: nn.Module, param_name: str, qs_dict: dict[str, Any], device: torch.device\n):\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n quant_state = F.QuantState.from_dict(qs_dict, device=device)\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef replace_parameter_4bit(\n module: nn.Module,\n param_name: str,\n compress_statistics: bool = False,\n quant_type: Literal[\"nf4\", \"fp4\"] = \"nf4\",\n blocksize: Optional[int] = None,\n):\n \"\"\"\n Replace a module parameter with a 4-bit quantized version using parametrization.\n\n This function quantizes an existing parameter in a PyTorch module to 4-bit precision\n and sets up parametrization to handle automatic dequantization during forward passes.\n The original parameter is replaced with quantized data, and a parametrization layer\n is registered to manage the quantization state and dequantization process.\n\n Additional, it registers a state dict post-hook to ensure that the quantization state\n is saved correctly when the model's state dict is saved.\n","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize.replace_parameter_4bit","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize.replace_parameter_4bit#L62-L126","kind":"function","name":"replace_parameter_4bit","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":62,"end_line":126,"context_start_line":42,"context_end_line":146,"code":"def replace_parameter_4bit_prequantized(\n module: nn.Module, param_name: str, qs_dict: dict[str, Any], device: torch.device\n):\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n quant_state = F.QuantState.from_dict(qs_dict, device=device)\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef replace_parameter_4bit(\n module: nn.Module,\n param_name: str,\n compress_statistics: bool = False,\n quant_type: Literal[\"nf4\", \"fp4\"] = \"nf4\",\n blocksize: Optional[int] = None,\n):\n \"\"\"\n Replace a module parameter with a 4-bit quantized version using parametrization.\n\n This function quantizes an existing parameter in a PyTorch module to 4-bit precision\n and sets up parametrization to handle automatic dequantization during forward passes.\n The original parameter is replaced with quantized data, and a parametrization layer\n is registered to manage the quantization state and dequantization process.\n\n Additional, it registers a state dict post-hook to ensure that the quantization state\n is saved correctly when the model's state dict is saved.\n\n It is useful for MoE models or other scenarios where you want to quantize parameters\n outside of nn.Linear layers without changing the model's architecture.\n\n This feature is experimental and may change in future releases.\n\n Args:\n module (`nn.Module`):\n The PyTorch module containing the parameter to be quantized.\n param_name (`str`):\n The name of the parameter within the module to quantize.\n compress_statistics (`bool`, *optional*, defaults to `False`):\n Whether to compress quantization statistics to reduce memory usage.\n quant_type (`Literal[\"nf4\", \"fp4\"]`, *optional*, defaults to `\"nf4\"`):\n The quantization format to use.\n blocksize (`int`, *optional*, defaults to `None`):\n The block size for quantization. If None, uses the default block size.\n\n Raises:\n AttributeError: If the module does not have the specified parameter.\n TypeError: If the specified attribute is not an instance of nn.Parameter.\n \"\"\"\n\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n # Quantize the original parameter.\n quantized_data, quant_state = F.quantize_4bit(\n original_param.data,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n\n # Replace the parameter with the quantized data.\n setattr(module, param_name, nn.Parameter(quantized_data, requires_grad=False))\n del original_param\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef _disable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...], output: Any):\n P._cache_enabled -= 1\n if not P._cache_enabled:\n P._cache = {}\n\n\ndef _enable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...]):\n P._cache_enabled += 1\n\n\ndef _register_parametrization_hooks(module: nn.Module, param_name: str):\n # Register a state dict hook for saving. Note that this requires torch >= 2.5.0.\n if torch.__version__ >= (2, 5):\n module.register_state_dict_post_hook(\n partial(\n _parametrized_state_dict_post_hook,\n param_name=param_name,\n )","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize._disable_parametrization_cache","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize._disable_parametrization_cache#L129-L132","kind":"function","name":"_disable_parametrization_cache","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":129,"end_line":132,"context_start_line":109,"context_end_line":152,"code":"\n # Quantize the original parameter.\n quantized_data, quant_state = F.quantize_4bit(\n original_param.data,\n blocksize=blocksize,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n )\n\n # Replace the parameter with the quantized data.\n setattr(module, param_name, nn.Parameter(quantized_data, requires_grad=False))\n del original_param\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef _disable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...], output: Any):\n P._cache_enabled -= 1\n if not P._cache_enabled:\n P._cache = {}\n\n\ndef _enable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...]):\n P._cache_enabled += 1\n\n\ndef _register_parametrization_hooks(module: nn.Module, param_name: str):\n # Register a state dict hook for saving. Note that this requires torch >= 2.5.0.\n if torch.__version__ >= (2, 5):\n module.register_state_dict_post_hook(\n partial(\n _parametrized_state_dict_post_hook,\n param_name=param_name,\n )\n )\n\n # Register hooks to enable caching for the dequantization parametrization.\n # This helps preserve time and memory when the same quantized parameter\n # is accessed multiple times in the forward computation.\n module.register_forward_pre_hook(_enable_parametrization_cache)","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize._enable_parametrization_cache","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize._enable_parametrization_cache#L135-L136","kind":"function","name":"_enable_parametrization_cache","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":135,"end_line":136,"context_start_line":115,"context_end_line":156,"code":" quant_type=quant_type,\n )\n\n # Replace the parameter with the quantized data.\n setattr(module, param_name, nn.Parameter(quantized_data, requires_grad=False))\n del original_param\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef _disable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...], output: Any):\n P._cache_enabled -= 1\n if not P._cache_enabled:\n P._cache = {}\n\n\ndef _enable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...]):\n P._cache_enabled += 1\n\n\ndef _register_parametrization_hooks(module: nn.Module, param_name: str):\n # Register a state dict hook for saving. Note that this requires torch >= 2.5.0.\n if torch.__version__ >= (2, 5):\n module.register_state_dict_post_hook(\n partial(\n _parametrized_state_dict_post_hook,\n param_name=param_name,\n )\n )\n\n # Register hooks to enable caching for the dequantization parametrization.\n # This helps preserve time and memory when the same quantized parameter\n # is accessed multiple times in the forward computation.\n module.register_forward_pre_hook(_enable_parametrization_cache)\n module.register_forward_hook(_disable_parametrization_cache)\n\n\ndef _parametrized_state_dict_post_hook(","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize._register_parametrization_hooks","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize._register_parametrization_hooks#L139-L153","kind":"function","name":"_register_parametrization_hooks","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":139,"end_line":153,"context_start_line":119,"context_end_line":173,"code":" setattr(module, param_name, nn.Parameter(quantized_data, requires_grad=False))\n del original_param\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)\n\n\ndef _disable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...], output: Any):\n P._cache_enabled -= 1\n if not P._cache_enabled:\n P._cache = {}\n\n\ndef _enable_parametrization_cache(module: nn.Module, inputs: tuple[Any, ...]):\n P._cache_enabled += 1\n\n\ndef _register_parametrization_hooks(module: nn.Module, param_name: str):\n # Register a state dict hook for saving. Note that this requires torch >= 2.5.0.\n if torch.__version__ >= (2, 5):\n module.register_state_dict_post_hook(\n partial(\n _parametrized_state_dict_post_hook,\n param_name=param_name,\n )\n )\n\n # Register hooks to enable caching for the dequantization parametrization.\n # This helps preserve time and memory when the same quantized parameter\n # is accessed multiple times in the forward computation.\n module.register_forward_pre_hook(_enable_parametrization_cache)\n module.register_forward_hook(_disable_parametrization_cache)\n\n\ndef _parametrized_state_dict_post_hook(\n module: nn.Module,\n state_dict: dict[str, Any],\n prefix: str,\n local_metadata: Any,\n *,\n param_name: str = \"weight\",\n **kwargs: dict[str, Any],\n) -> None:\n \"\"\"\n Hook to modify the state dict to include the quantization state.\n \"\"\"\n\n original_key = f\"{prefix}parametrizations.{param_name}.original\"\n\n if original_key in state_dict:\n # Create a clean entry.\n # The `parametrizations.{param_name}.original` key will have the quantized data,","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize._parametrized_state_dict_post_hook","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize._parametrized_state_dict_post_hook#L156-L192","kind":"function","name":"_parametrized_state_dict_post_hook","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":156,"end_line":192,"context_start_line":136,"context_end_line":192,"code":" P._cache_enabled += 1\n\n\ndef _register_parametrization_hooks(module: nn.Module, param_name: str):\n # Register a state dict hook for saving. Note that this requires torch >= 2.5.0.\n if torch.__version__ >= (2, 5):\n module.register_state_dict_post_hook(\n partial(\n _parametrized_state_dict_post_hook,\n param_name=param_name,\n )\n )\n\n # Register hooks to enable caching for the dequantization parametrization.\n # This helps preserve time and memory when the same quantized parameter\n # is accessed multiple times in the forward computation.\n module.register_forward_pre_hook(_enable_parametrization_cache)\n module.register_forward_hook(_disable_parametrization_cache)\n\n\ndef _parametrized_state_dict_post_hook(\n module: nn.Module,\n state_dict: dict[str, Any],\n prefix: str,\n local_metadata: Any,\n *,\n param_name: str = \"weight\",\n **kwargs: dict[str, Any],\n) -> None:\n \"\"\"\n Hook to modify the state dict to include the quantization state.\n \"\"\"\n\n original_key = f\"{prefix}parametrizations.{param_name}.original\"\n\n if original_key in state_dict:\n # Create a clean entry.\n # The `parametrizations.{param_name}.original` key will have the quantized data,\n # but we would like it to keep it in the state_dict as `{param_name}`.\n clean_key = f\"{prefix}{param_name}\"\n state_dict[clean_key] = state_dict.pop(original_key)\n\n assert P.is_parametrized(module, param_name)\n\n # Find the parametrization, which should have the quantization state.\n parametrization: Bnb4bitParametrization = next(\n filter(lambda x: isinstance(x, Bnb4bitParametrization), module.parametrizations[param_name]), None\n )\n\n assert parametrization is not None, \"Parametrization not found for the parameter.\"\n\n quant_state = parametrization.quant_state\n\n # Next, we need to store the quantization state.\n if quant_state is not None:\n for k, v in quant_state.as_dict(packed=True).items():\n state_dict[f\"{prefix}{param_name}.{k}\"] = v","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize.__init__","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize.__init__#L24-L26","kind":"function","name":"__init__","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":24,"end_line":26,"context_start_line":4,"context_end_line":46,"code":"import torch\nimport torch.nn as nn\nimport torch.nn.utils.parametrize as P\n\nfrom .. import functional as F\n\n\nclass Bnb4bitParametrization(nn.Module):\n \"\"\"\n A parametrization module that handles dequantization of a 4-bit quantized parameter.\n\n The parameter data is expected to be already quantized when this parametrization is applied.\n This module will dequantize the parameter data to its original floating-point representation\n when the forward method is called (i.e. when the parameter is accessed).\n\n Args:\n quant_state (`F.QuantState`):\n The quantization state containing the necessary information for dequantization.\n \"\"\"\n\n def __init__(self, quant_state: F.QuantState):\n super().__init__()\n self.quant_state = quant_state\n\n @torch.no_grad()\n def forward(self, quantized_param: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass to dequantize the parameter.\n\n Args:\n quantized_param (`torch.Tensor`): The quantized parameter tensor (from .original)\n\n Returns:\n `torch.Tensor`: The dequantized parameter tensor in the original shape and dtype.\n \"\"\"\n return F.dequantize_4bit(quantized_param, self.quant_state)\n\n\ndef replace_parameter_4bit_prequantized(\n module: nn.Module, param_name: str, qs_dict: dict[str, Any], device: torch.device\n):\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.parametrize.forward","uri":"program://bitsandbytes/function/bitsandbytes.nn.parametrize.forward#L29-L39","kind":"function","name":"forward","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":29,"end_line":39,"context_start_line":9,"context_end_line":59,"code":"\n\nclass Bnb4bitParametrization(nn.Module):\n \"\"\"\n A parametrization module that handles dequantization of a 4-bit quantized parameter.\n\n The parameter data is expected to be already quantized when this parametrization is applied.\n This module will dequantize the parameter data to its original floating-point representation\n when the forward method is called (i.e. when the parameter is accessed).\n\n Args:\n quant_state (`F.QuantState`):\n The quantization state containing the necessary information for dequantization.\n \"\"\"\n\n def __init__(self, quant_state: F.QuantState):\n super().__init__()\n self.quant_state = quant_state\n\n @torch.no_grad()\n def forward(self, quantized_param: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass to dequantize the parameter.\n\n Args:\n quantized_param (`torch.Tensor`): The quantized parameter tensor (from .original)\n\n Returns:\n `torch.Tensor`: The dequantized parameter tensor in the original shape and dtype.\n \"\"\"\n return F.dequantize_4bit(quantized_param, self.quant_state)\n\n\ndef replace_parameter_4bit_prequantized(\n module: nn.Module, param_name: str, qs_dict: dict[str, Any], device: torch.device\n):\n if not hasattr(module, param_name):\n raise AttributeError(f\"Module does not have parameter '{param_name}'\")\n\n original_param = getattr(module, param_name)\n\n if not isinstance(original_param, nn.Parameter):\n raise TypeError(f\"Parameter '{param_name}' is not an instance of nn.Parameter\")\n\n quant_state = F.QuantState.from_dict(qs_dict, device=device)\n\n # Apply a parametrization to the module to handle dequantization.\n P.register_parametrization(module, param_name, Bnb4bitParametrization(quant_state), unsafe=True)\n\n # Next, register hooks.\n _register_parametrization_hooks(module, param_name)","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules","uri":"program://bitsandbytes/module/bitsandbytes.nn.modules#L1-L1120","kind":"module","name":"bitsandbytes.nn.modules","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1,"end_line":1120,"context_start_line":1,"context_end_line":1120,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport copy\nfrom typing import Any, Optional, TypeVar, Union, overload\nimport warnings\n\nimport torch\nfrom torch import Tensor, device, dtype, nn\nimport torch.nn.functional as F\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom bitsandbytes.functional import QuantState\nfrom bitsandbytes.optim import GlobalOptimManager\nfrom bitsandbytes.utils import INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING, OutlierTracer\n\nT = TypeVar(\"T\", bound=\"torch.nn.Module\")\n\n\nclass StableEmbedding(torch.nn.Embedding):\n \"\"\"\n Custom embedding layer designed to improve stability during training for NLP tasks by using 32-bit optimizer states. It is designed to reduce gradient variations that can result from quantization. This embedding layer is initialized with Xavier uniform initialization followed by layer normalization.\n\n Example:\n\n ```\n # Initialize StableEmbedding layer with vocabulary size 1000, embedding dimension 300\n embedding_layer = StableEmbedding(num_embeddings=1000, embedding_dim=300)\n\n # Reset embedding parameters\n embedding_layer.reset_parameters()\n\n # Perform a forward pass with input tensor\n input_tensor = torch.tensor([1, 2, 3])\n output_embedding = embedding_layer(input_tensor)\n ```\n\n Attributes:\n norm (`torch.nn.LayerNorm`): Layer normalization applied after the embedding.\n\n Methods:\n reset_parameters(): Reset embedding parameters using Xavier uniform initialization.\n forward(input: Tensor) -> Tensor: Forward pass through the stable embedding layer.\n \"\"\"\n\n def __init__(\n self,\n num_embeddings: int,\n embedding_dim: int,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.0,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n _weight: Optional[Tensor] = None,\n device=None,\n dtype=None,\n ) -> None:\n \"\"\"\n Args:\n num_embeddings (`int`):\n The number of unique embeddings (vocabulary size).\n embedding_dim (`int`):\n The dimensionality of the embedding.\n padding_idx (`Optional[int]`):\n Pads the output with zeros at the given index.\n max_norm (`Optional[float]`):\n Renormalizes embeddings to have a maximum L2 norm.\n norm_type (`float`, defaults to `2.0`):\n The p-norm to compute for the `max_norm` option.\n scale_grad_by_freq (`bool`, defaults to `False`):\n Scale gradient by frequency during backpropagation.\n sparse (`bool`, defaults to `False`):\n Computes dense gradients. Set to `True` to compute sparse gradients instead.\n _weight (`Optional[Tensor]`):\n Pretrained embeddings.\n \"\"\"\n super().__init__(\n num_embeddings,\n embedding_dim,\n padding_idx,\n max_norm,\n norm_type,\n scale_grad_by_freq,\n sparse,\n _weight,\n device,\n dtype,\n )\n self.norm = torch.nn.LayerNorm(embedding_dim, device=device)\n GlobalOptimManager.get_instance().register_module_override(self, \"weight\", {\"optim_bits\": 32})\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight)\n self._fill_padding_idx_with_zero()\n\n \"\"\" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding\n to make the Layer compatible with Pytorch < 1.9.\n This means that if this changes in future PyTorch releases this need to change too\n which is cumbersome. However, with this we can ensure compatibility with previous\n PyTorch releases.\n \"\"\"\n\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n # always apply layer norm in full precision\n emb = emb.to(torch.get_default_dtype())\n\n return self.norm(emb).to(self.weight.dtype)\n\n\nclass Embedding(torch.nn.Embedding):\n \"\"\"\n Embedding class to store and retrieve word embeddings from their indices.\n \"\"\"\n\n def __init__(\n self,\n num_embeddings: int,\n embedding_dim: int,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.0,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n _weight: Optional[Tensor] = None,\n device: Optional[device] = None,\n ) -> None:\n \"\"\"\n Args:\n num_embeddings (`int`):\n The number of unique embeddings (vocabulary size).\n embedding_dim (`int`):\n The dimensionality of the embedding.\n padding_idx (`Optional[int]`):\n Pads the output with zeros at the given index.\n max_norm (`Optional[float]`):\n Renormalizes embeddings to have a maximum L2 norm.\n norm_type (`float`, defaults to `2.0`):\n The p-norm to compute for the `max_norm` option.\n scale_grad_by_freq (`bool`, defaults to `False`):\n Scale gradient by frequency during backpropagation.\n sparse (`bool`, defaults to `False`):\n Computes dense gradients. Set to `True` to compute sparse gradients instead.\n _weight (`Optional[Tensor]`):\n Pretrained embeddings.\n \"\"\"\n super().__init__(\n num_embeddings,\n embedding_dim,\n padding_idx,\n max_norm,\n norm_type,\n scale_grad_by_freq,\n sparse,\n _weight,\n device=device,\n )\n GlobalOptimManager.get_instance().register_module_override(self, \"weight\", {\"optim_bits\": 32})\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight)\n self._fill_padding_idx_with_zero()\n\n \"\"\" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding\n to make the Layer compatible with Pytorch < 1.9.\n This means that if this changes in future PyTorch releases this need to change too\n which is cumbersome. However, with this we can ensure compatibility with previous\n PyTorch releases.\n \"\"\"\n\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n return emb\n\n\nclass Params4bit(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=False, # quantized weights should be frozen by default\n quant_state: Optional[QuantState] = None,\n blocksize: Optional[int] = None,\n compress_statistics: bool = True,\n quant_type: str = \"fp4\",\n quant_storage: torch.dtype = torch.uint8,\n module: Optional[\"Linear4bit\"] = None,\n bnb_quantized: bool = False,\n ) -> \"Params4bit\":\n if data is None:\n data = torch.empty(0)\n\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n\n self = torch.Tensor._make_subclass(cls, data, requires_grad)\n self.blocksize = blocksize\n self.compress_statistics = compress_statistics\n self.quant_type = quant_type\n self.quant_state = quant_state\n self.quant_storage = quant_storage\n self.bnb_quantized = bnb_quantized\n self.data = data\n self.module = module\n return self\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"data\"] = self.data\n state[\"requires_grad\"] = self.requires_grad\n return state\n\n def __setstate__(self, state):\n self.requires_grad = state[\"requires_grad\"]\n self.blocksize = state[\"blocksize\"]\n self.compress_statistics = state[\"compress_statistics\"]\n self.quant_type = state[\"quant_type\"]\n self.quant_state = state[\"quant_state\"]\n self.data = state[\"data\"]\n self.quant_storage = state[\"quant_storage\"]\n self.bnb_quantized = state[\"bnb_quantized\"]\n self.module = state[\"module\"]\n\n def __deepcopy__(self, memo):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n new_instance.quant_state = copy.deepcopy(state[\"quant_state\"])\n new_instance.data = copy.deepcopy(state[\"data\"])\n return new_instance\n\n def __copy__(self):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n return new_instance\n\n @classmethod\n def from_prequantized(\n cls,\n data: torch.Tensor,\n quantized_stats: dict[str, Any],\n requires_grad: bool = False,\n device=\"cuda\",\n module: Optional[\"Linear4bit\"] = None,\n **kwargs,\n ) -> \"Params4bit\":\n self = torch.Tensor._make_subclass(cls, data.to(device))\n self.requires_grad = requires_grad\n self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)\n self.blocksize = self.quant_state.blocksize\n self.compress_statistics = self.quant_state.nested\n self.quant_type = self.quant_state.quant_type\n self.bnb_quantized = True\n\n self.quant_storage = data.dtype\n self.module = module\n\n if self.module is not None:\n self.module.quant_state = self.quant_state\n\n return self\n\n def _quantize(self, device):\n w = self.data.contiguous().to(device)\n w_4bit, quant_state = bnb.functional.quantize_4bit(\n w,\n blocksize=self.blocksize,\n compress_statistics=self.compress_statistics,\n quant_type=self.quant_type,\n quant_storage=self.quant_storage,\n )\n self.data = w_4bit\n self.quant_state = quant_state\n if self.module is not None:\n self.module.quant_state = quant_state\n self.bnb_quantized = True\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,\n dtype: Optional[Union[dtype, str]] = ...,\n non_blocking: bool = ...,\n ) -> T: ...\n\n @overload\n def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...\n\n @overload\n def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...\n\n def to(self, *args, **kwargs):\n device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)\n\n if device is not None and device.type != \"meta\" and not self.bnb_quantized:\n return self._quantize(device)\n else:\n if self.quant_state is not None:\n self.quant_state.to(device)\n\n new_param = Params4bit(\n super().to(device=device, dtype=dtype, non_blocking=non_blocking),\n requires_grad=self.requires_grad,\n quant_state=self.quant_state,\n blocksize=self.blocksize,\n compress_statistics=self.compress_statistics,\n quant_type=self.quant_type,\n quant_storage=self.quant_storage,\n bnb_quantized=self.bnb_quantized,\n )\n\n return new_param\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n\n if func in [torch.chunk, torch.split]:\n tensor = args[0]\n\n result = super().__torch_function__(func, types, args, kwargs)\n\n if isinstance(result, tuple):\n return tuple(\n cls(\n data=chunk,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n for chunk in result\n )\n else:\n return cls(\n data=result,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n\n return super().__torch_function__(func, types, args, kwargs)\n\n\ndef fix_4bit_weight_quant_state_from_module(module: Union[\"Embedding4bit\", \"Linear4bit\"]):\n if getattr(module.weight, \"quant_state\", None) is not None:\n return\n\n if getattr(module, \"quant_state\", None) is None:\n warnings.warn(\n \"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.\",\n )\n\n # the quant state got lost when the parameter got converted. This happens for example for fsdp\n # since we registered the module, we can recover the state here\n assert module.weight.shape[1] == 1\n if not isinstance(module.weight, Params4bit):\n module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)\n module.weight.quant_state = module.quant_state\n\n\nclass Linear4bit(nn.Linear):\n \"\"\"\n This class is the base module for the 4-bit quantization algorithm presented in [QLoRA](https://arxiv.org/abs/2305.14314).\n QLoRA 4-bit linear layers uses blockwise k-bit quantization under the hood, with the possibility of selecting various\n compute datatypes such as FP4 and NF4.\n\n In order to quantize a linear layer one should first load the original fp16 / bf16 weights into\n the Linear4bit module, then call `quantized_module.to(\"cuda\")` to quantize the fp16 / bf16 weights.\n\n Example:\n\n ```python\n import torch\n import torch.nn as nn\n\n import bitsandbytes as bnb\n from bnb.nn import Linear4bit\n\n fp16_model = nn.Sequential(\n nn.Linear(64, 64),\n nn.Linear(64, 64)\n )\n\n quantized_model = nn.Sequential(\n Linear4bit(64, 64),\n Linear4bit(64, 64)\n )\n\n quantized_model.load_state_dict(fp16_model.state_dict())\n quantized_model = quantized_model.to(0) # Quantization happens here\n ```\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n compute_dtype=None,\n compress_statistics=True,\n quant_type=\"fp4\",\n quant_storage=torch.uint8,\n device=None,\n ):\n \"\"\"\n Initialize Linear4bit class.\n\n Args:\n input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(input_features, output_features, bias, device)\n self.weight = Params4bit(\n self.weight.data,\n requires_grad=False,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n quant_storage=quant_storage,\n module=self,\n )\n # self.persistent_buffers = [] # TODO consider as way to save quant state\n self.compute_dtype = compute_dtype\n self.compute_type_is_set = compute_dtype is not None\n self.quant_state = None\n self.quant_storage = quant_storage\n\n def set_compute_type(self, x):\n if x.dtype in [torch.float32, torch.bfloat16]:\n # the input is in a dtype that is safe to compute in, we switch\n # to this type for speed and stability\n self.compute_dtype = x.dtype\n elif x.dtype == torch.float16:\n # we take the compoute dtype passed into the layer\n if self.compute_dtype in [None, torch.float32] and (x.numel() == x.shape[-1]):\n # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast\n # warn the user about this\n warnings.warn(\n \"Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.\",\n )\n warnings.filterwarnings(\"ignore\", message=\".*inference.\")\n if self.compute_dtype in [None, torch.float32] and (x.numel() != x.shape[-1]):\n warnings.warn(\n \"Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.\",\n )\n warnings.filterwarnings(\"ignore\", message=\".*inference or training\")\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n \"\"\"\n save weight and bias,\n then fill state_dict with components of quant_state\n \"\"\"\n super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias\n\n if getattr(self.weight, \"quant_state\", None) is not None:\n for k, v in self.weight.quant_state.as_dict(packed=True).items():\n destination[prefix + \"weight.\" + k] = v if keep_vars else v.detach()\n\n def forward(self, x: torch.Tensor):\n fix_4bit_weight_quant_state_from_module(self)\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n if not self.compute_type_is_set:\n self.set_compute_type(x)\n self.compute_type_is_set = True\n\n inp_dtype = x.dtype\n if self.compute_dtype is not None:\n x = x.to(self.compute_dtype)\n\n bias = None if self.bias is None else self.bias.to(self.compute_dtype)\n weight = self.weight.t()\n\n return bnb.matmul_4bit(x, weight, bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)\n\n\nclass LinearFP4(Linear4bit):\n \"\"\"\n Implements the FP4 data type.\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n# ... truncated ...","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":true} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.StableEmbedding","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.StableEmbedding#L22-L125","kind":"class","name":"StableEmbedding","path":"bitsandbytes/nn/modules.py","language":"python","start_line":22,"end_line":125,"context_start_line":2,"context_end_line":145,"code":"#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport copy\nfrom typing import Any, Optional, TypeVar, Union, overload\nimport warnings\n\nimport torch\nfrom torch import Tensor, device, dtype, nn\nimport torch.nn.functional as F\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom bitsandbytes.functional import QuantState\nfrom bitsandbytes.optim import GlobalOptimManager\nfrom bitsandbytes.utils import INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING, OutlierTracer\n\nT = TypeVar(\"T\", bound=\"torch.nn.Module\")\n\n\nclass StableEmbedding(torch.nn.Embedding):\n \"\"\"\n Custom embedding layer designed to improve stability during training for NLP tasks by using 32-bit optimizer states. It is designed to reduce gradient variations that can result from quantization. This embedding layer is initialized with Xavier uniform initialization followed by layer normalization.\n\n Example:\n\n ```\n # Initialize StableEmbedding layer with vocabulary size 1000, embedding dimension 300\n embedding_layer = StableEmbedding(num_embeddings=1000, embedding_dim=300)\n\n # Reset embedding parameters\n embedding_layer.reset_parameters()\n\n # Perform a forward pass with input tensor\n input_tensor = torch.tensor([1, 2, 3])\n output_embedding = embedding_layer(input_tensor)\n ```\n\n Attributes:\n norm (`torch.nn.LayerNorm`): Layer normalization applied after the embedding.\n\n Methods:\n reset_parameters(): Reset embedding parameters using Xavier uniform initialization.\n forward(input: Tensor) -> Tensor: Forward pass through the stable embedding layer.\n \"\"\"\n\n def __init__(\n self,\n num_embeddings: int,\n embedding_dim: int,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.0,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n _weight: Optional[Tensor] = None,\n device=None,\n dtype=None,\n ) -> None:\n \"\"\"\n Args:\n num_embeddings (`int`):\n The number of unique embeddings (vocabulary size).\n embedding_dim (`int`):\n The dimensionality of the embedding.\n padding_idx (`Optional[int]`):\n Pads the output with zeros at the given index.\n max_norm (`Optional[float]`):\n Renormalizes embeddings to have a maximum L2 norm.\n norm_type (`float`, defaults to `2.0`):\n The p-norm to compute for the `max_norm` option.\n scale_grad_by_freq (`bool`, defaults to `False`):\n Scale gradient by frequency during backpropagation.\n sparse (`bool`, defaults to `False`):\n Computes dense gradients. Set to `True` to compute sparse gradients instead.\n _weight (`Optional[Tensor]`):\n Pretrained embeddings.\n \"\"\"\n super().__init__(\n num_embeddings,\n embedding_dim,\n padding_idx,\n max_norm,\n norm_type,\n scale_grad_by_freq,\n sparse,\n _weight,\n device,\n dtype,\n )\n self.norm = torch.nn.LayerNorm(embedding_dim, device=device)\n GlobalOptimManager.get_instance().register_module_override(self, \"weight\", {\"optim_bits\": 32})\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight)\n self._fill_padding_idx_with_zero()\n\n \"\"\" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding\n to make the Layer compatible with Pytorch < 1.9.\n This means that if this changes in future PyTorch releases this need to change too\n which is cumbersome. However, with this we can ensure compatibility with previous\n PyTorch releases.\n \"\"\"\n\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n # always apply layer norm in full precision\n emb = emb.to(torch.get_default_dtype())\n\n return self.norm(emb).to(self.weight.dtype)\n\n\nclass Embedding(torch.nn.Embedding):\n \"\"\"\n Embedding class to store and retrieve word embeddings from their indices.\n \"\"\"\n\n def __init__(\n self,\n num_embeddings: int,\n embedding_dim: int,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.0,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n _weight: Optional[Tensor] = None,\n device: Optional[device] = None,\n ) -> None:\n \"\"\"","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Embedding","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Embedding#L128-L204","kind":"class","name":"Embedding","path":"bitsandbytes/nn/modules.py","language":"python","start_line":128,"end_line":204,"context_start_line":108,"context_end_line":224,"code":" with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n # always apply layer norm in full precision\n emb = emb.to(torch.get_default_dtype())\n\n return self.norm(emb).to(self.weight.dtype)\n\n\nclass Embedding(torch.nn.Embedding):\n \"\"\"\n Embedding class to store and retrieve word embeddings from their indices.\n \"\"\"\n\n def __init__(\n self,\n num_embeddings: int,\n embedding_dim: int,\n padding_idx: Optional[int] = None,\n max_norm: Optional[float] = None,\n norm_type: float = 2.0,\n scale_grad_by_freq: bool = False,\n sparse: bool = False,\n _weight: Optional[Tensor] = None,\n device: Optional[device] = None,\n ) -> None:\n \"\"\"\n Args:\n num_embeddings (`int`):\n The number of unique embeddings (vocabulary size).\n embedding_dim (`int`):\n The dimensionality of the embedding.\n padding_idx (`Optional[int]`):\n Pads the output with zeros at the given index.\n max_norm (`Optional[float]`):\n Renormalizes embeddings to have a maximum L2 norm.\n norm_type (`float`, defaults to `2.0`):\n The p-norm to compute for the `max_norm` option.\n scale_grad_by_freq (`bool`, defaults to `False`):\n Scale gradient by frequency during backpropagation.\n sparse (`bool`, defaults to `False`):\n Computes dense gradients. Set to `True` to compute sparse gradients instead.\n _weight (`Optional[Tensor]`):\n Pretrained embeddings.\n \"\"\"\n super().__init__(\n num_embeddings,\n embedding_dim,\n padding_idx,\n max_norm,\n norm_type,\n scale_grad_by_freq,\n sparse,\n _weight,\n device=device,\n )\n GlobalOptimManager.get_instance().register_module_override(self, \"weight\", {\"optim_bits\": 32})\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight)\n self._fill_padding_idx_with_zero()\n\n \"\"\" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding\n to make the Layer compatible with Pytorch < 1.9.\n This means that if this changes in future PyTorch releases this need to change too\n which is cumbersome. However, with this we can ensure compatibility with previous\n PyTorch releases.\n \"\"\"\n\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n return emb\n\n\nclass Params4bit(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=False, # quantized weights should be frozen by default\n quant_state: Optional[QuantState] = None,\n blocksize: Optional[int] = None,\n compress_statistics: bool = True,\n quant_type: str = \"fp4\",\n quant_storage: torch.dtype = torch.uint8,\n module: Optional[\"Linear4bit\"] = None,\n bnb_quantized: bool = False,\n ) -> \"Params4bit\":\n if data is None:\n data = torch.empty(0)\n\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Params4bit","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Params4bit#L207-L393","kind":"class","name":"Params4bit","path":"bitsandbytes/nn/modules.py","language":"python","start_line":207,"end_line":393,"context_start_line":187,"context_end_line":413,"code":"\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n return emb\n\n\nclass Params4bit(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=False, # quantized weights should be frozen by default\n quant_state: Optional[QuantState] = None,\n blocksize: Optional[int] = None,\n compress_statistics: bool = True,\n quant_type: str = \"fp4\",\n quant_storage: torch.dtype = torch.uint8,\n module: Optional[\"Linear4bit\"] = None,\n bnb_quantized: bool = False,\n ) -> \"Params4bit\":\n if data is None:\n data = torch.empty(0)\n\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n\n self = torch.Tensor._make_subclass(cls, data, requires_grad)\n self.blocksize = blocksize\n self.compress_statistics = compress_statistics\n self.quant_type = quant_type\n self.quant_state = quant_state\n self.quant_storage = quant_storage\n self.bnb_quantized = bnb_quantized\n self.data = data\n self.module = module\n return self\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"data\"] = self.data\n state[\"requires_grad\"] = self.requires_grad\n return state\n\n def __setstate__(self, state):\n self.requires_grad = state[\"requires_grad\"]\n self.blocksize = state[\"blocksize\"]\n self.compress_statistics = state[\"compress_statistics\"]\n self.quant_type = state[\"quant_type\"]\n self.quant_state = state[\"quant_state\"]\n self.data = state[\"data\"]\n self.quant_storage = state[\"quant_storage\"]\n self.bnb_quantized = state[\"bnb_quantized\"]\n self.module = state[\"module\"]\n\n def __deepcopy__(self, memo):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n new_instance.quant_state = copy.deepcopy(state[\"quant_state\"])\n new_instance.data = copy.deepcopy(state[\"data\"])\n return new_instance\n\n def __copy__(self):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n return new_instance\n\n @classmethod\n def from_prequantized(\n cls,\n data: torch.Tensor,\n quantized_stats: dict[str, Any],\n requires_grad: bool = False,\n device=\"cuda\",\n module: Optional[\"Linear4bit\"] = None,\n **kwargs,\n ) -> \"Params4bit\":\n self = torch.Tensor._make_subclass(cls, data.to(device))\n self.requires_grad = requires_grad\n self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)\n self.blocksize = self.quant_state.blocksize\n self.compress_statistics = self.quant_state.nested\n self.quant_type = self.quant_state.quant_type\n self.bnb_quantized = True\n\n self.quant_storage = data.dtype\n self.module = module\n\n if self.module is not None:\n self.module.quant_state = self.quant_state\n\n return self\n\n def _quantize(self, device):\n w = self.data.contiguous().to(device)\n w_4bit, quant_state = bnb.functional.quantize_4bit(\n w,\n blocksize=self.blocksize,\n compress_statistics=self.compress_statistics,\n quant_type=self.quant_type,\n quant_storage=self.quant_storage,\n )\n self.data = w_4bit\n self.quant_state = quant_state\n if self.module is not None:\n self.module.quant_state = quant_state\n self.bnb_quantized = True\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,\n dtype: Optional[Union[dtype, str]] = ...,\n non_blocking: bool = ...,\n ) -> T: ...\n\n @overload\n def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...\n\n @overload\n def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...\n\n def to(self, *args, **kwargs):\n device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)\n\n if device is not None and device.type != \"meta\" and not self.bnb_quantized:\n return self._quantize(device)\n else:\n if self.quant_state is not None:\n self.quant_state.to(device)\n\n new_param = Params4bit(\n super().to(device=device, dtype=dtype, non_blocking=non_blocking),\n requires_grad=self.requires_grad,\n quant_state=self.quant_state,\n blocksize=self.blocksize,\n compress_statistics=self.compress_statistics,\n quant_type=self.quant_type,\n quant_storage=self.quant_storage,\n bnb_quantized=self.bnb_quantized,\n )\n\n return new_param\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n\n if func in [torch.chunk, torch.split]:\n tensor = args[0]\n\n result = super().__torch_function__(func, types, args, kwargs)\n\n if isinstance(result, tuple):\n return tuple(\n cls(\n data=chunk,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n for chunk in result\n )\n else:\n return cls(\n data=result,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n\n return super().__torch_function__(func, types, args, kwargs)\n\n\ndef fix_4bit_weight_quant_state_from_module(module: Union[\"Embedding4bit\", \"Linear4bit\"]):\n if getattr(module.weight, \"quant_state\", None) is not None:\n return\n\n if getattr(module, \"quant_state\", None) is None:\n warnings.warn(\n \"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.\",\n )\n\n # the quant state got lost when the parameter got converted. This happens for example for fsdp\n # since we registered the module, we can recover the state here\n assert module.weight.shape[1] == 1\n if not isinstance(module.weight, Params4bit):\n module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)\n module.weight.quant_state = module.quant_state\n\n\nclass Linear4bit(nn.Linear):","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.fix_4bit_weight_quant_state_from_module","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.fix_4bit_weight_quant_state_from_module#L396-L410","kind":"function","name":"fix_4bit_weight_quant_state_from_module","path":"bitsandbytes/nn/modules.py","language":"python","start_line":396,"end_line":410,"context_start_line":376,"context_end_line":430,"code":" bnb_quantized=tensor.bnb_quantized,\n )\n for chunk in result\n )\n else:\n return cls(\n data=result,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n\n return super().__torch_function__(func, types, args, kwargs)\n\n\ndef fix_4bit_weight_quant_state_from_module(module: Union[\"Embedding4bit\", \"Linear4bit\"]):\n if getattr(module.weight, \"quant_state\", None) is not None:\n return\n\n if getattr(module, \"quant_state\", None) is None:\n warnings.warn(\n \"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.\",\n )\n\n # the quant state got lost when the parameter got converted. This happens for example for fsdp\n # since we registered the module, we can recover the state here\n assert module.weight.shape[1] == 1\n if not isinstance(module.weight, Params4bit):\n module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)\n module.weight.quant_state = module.quant_state\n\n\nclass Linear4bit(nn.Linear):\n \"\"\"\n This class is the base module for the 4-bit quantization algorithm presented in [QLoRA](https://arxiv.org/abs/2305.14314).\n QLoRA 4-bit linear layers uses blockwise k-bit quantization under the hood, with the possibility of selecting various\n compute datatypes such as FP4 and NF4.\n\n In order to quantize a linear layer one should first load the original fp16 / bf16 weights into\n the Linear4bit module, then call `quantized_module.to(\"cuda\")` to quantize the fp16 / bf16 weights.\n\n Example:\n\n ```python\n import torch\n import torch.nn as nn\n\n import bitsandbytes as bnb\n from bnb.nn import Linear4bit\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Linear4bit","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Linear4bit#L413-L532","kind":"class","name":"Linear4bit","path":"bitsandbytes/nn/modules.py","language":"python","start_line":413,"end_line":532,"context_start_line":393,"context_end_line":552,"code":" return super().__torch_function__(func, types, args, kwargs)\n\n\ndef fix_4bit_weight_quant_state_from_module(module: Union[\"Embedding4bit\", \"Linear4bit\"]):\n if getattr(module.weight, \"quant_state\", None) is not None:\n return\n\n if getattr(module, \"quant_state\", None) is None:\n warnings.warn(\n \"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.\",\n )\n\n # the quant state got lost when the parameter got converted. This happens for example for fsdp\n # since we registered the module, we can recover the state here\n assert module.weight.shape[1] == 1\n if not isinstance(module.weight, Params4bit):\n module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)\n module.weight.quant_state = module.quant_state\n\n\nclass Linear4bit(nn.Linear):\n \"\"\"\n This class is the base module for the 4-bit quantization algorithm presented in [QLoRA](https://arxiv.org/abs/2305.14314).\n QLoRA 4-bit linear layers uses blockwise k-bit quantization under the hood, with the possibility of selecting various\n compute datatypes such as FP4 and NF4.\n\n In order to quantize a linear layer one should first load the original fp16 / bf16 weights into\n the Linear4bit module, then call `quantized_module.to(\"cuda\")` to quantize the fp16 / bf16 weights.\n\n Example:\n\n ```python\n import torch\n import torch.nn as nn\n\n import bitsandbytes as bnb\n from bnb.nn import Linear4bit\n\n fp16_model = nn.Sequential(\n nn.Linear(64, 64),\n nn.Linear(64, 64)\n )\n\n quantized_model = nn.Sequential(\n Linear4bit(64, 64),\n Linear4bit(64, 64)\n )\n\n quantized_model.load_state_dict(fp16_model.state_dict())\n quantized_model = quantized_model.to(0) # Quantization happens here\n ```\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n compute_dtype=None,\n compress_statistics=True,\n quant_type=\"fp4\",\n quant_storage=torch.uint8,\n device=None,\n ):\n \"\"\"\n Initialize Linear4bit class.\n\n Args:\n input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(input_features, output_features, bias, device)\n self.weight = Params4bit(\n self.weight.data,\n requires_grad=False,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n quant_storage=quant_storage,\n module=self,\n )\n # self.persistent_buffers = [] # TODO consider as way to save quant state\n self.compute_dtype = compute_dtype\n self.compute_type_is_set = compute_dtype is not None\n self.quant_state = None\n self.quant_storage = quant_storage\n\n def set_compute_type(self, x):\n if x.dtype in [torch.float32, torch.bfloat16]:\n # the input is in a dtype that is safe to compute in, we switch\n # to this type for speed and stability\n self.compute_dtype = x.dtype\n elif x.dtype == torch.float16:\n # we take the compoute dtype passed into the layer\n if self.compute_dtype in [None, torch.float32] and (x.numel() == x.shape[-1]):\n # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast\n # warn the user about this\n warnings.warn(\n \"Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.\",\n )\n warnings.filterwarnings(\"ignore\", message=\".*inference.\")\n if self.compute_dtype in [None, torch.float32] and (x.numel() != x.shape[-1]):\n warnings.warn(\n \"Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.\",\n )\n warnings.filterwarnings(\"ignore\", message=\".*inference or training\")\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n \"\"\"\n save weight and bias,\n then fill state_dict with components of quant_state\n \"\"\"\n super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias\n\n if getattr(self.weight, \"quant_state\", None) is not None:\n for k, v in self.weight.quant_state.as_dict(packed=True).items():\n destination[prefix + \"weight.\" + k] = v if keep_vars else v.detach()\n\n def forward(self, x: torch.Tensor):\n fix_4bit_weight_quant_state_from_module(self)\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n if not self.compute_type_is_set:\n self.set_compute_type(x)\n self.compute_type_is_set = True\n\n inp_dtype = x.dtype\n if self.compute_dtype is not None:\n x = x.to(self.compute_dtype)\n\n bias = None if self.bias is None else self.bias.to(self.compute_dtype)\n weight = self.weight.t()\n\n return bnb.matmul_4bit(x, weight, bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)\n\n\nclass LinearFP4(Linear4bit):\n \"\"\"\n Implements the FP4 data type.\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n compute_dtype=None,\n compress_statistics=True,\n quant_storage=torch.uint8,\n device=None,\n ):\n \"\"\"\n Args:\n input_features (`str`):","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.LinearFP4","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.LinearFP4#L535-L568","kind":"class","name":"LinearFP4","path":"bitsandbytes/nn/modules.py","language":"python","start_line":535,"end_line":568,"context_start_line":515,"context_end_line":588,"code":" fix_4bit_weight_quant_state_from_module(self)\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n if not self.compute_type_is_set:\n self.set_compute_type(x)\n self.compute_type_is_set = True\n\n inp_dtype = x.dtype\n if self.compute_dtype is not None:\n x = x.to(self.compute_dtype)\n\n bias = None if self.bias is None else self.bias.to(self.compute_dtype)\n weight = self.weight.t()\n\n return bnb.matmul_4bit(x, weight, bias=bias, quant_state=self.weight.quant_state).to(inp_dtype)\n\n\nclass LinearFP4(Linear4bit):\n \"\"\"\n Implements the FP4 data type.\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n compute_dtype=None,\n compress_statistics=True,\n quant_storage=torch.uint8,\n device=None,\n ):\n \"\"\"\n Args:\n input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(\n input_features,\n output_features,\n bias,\n compute_dtype,\n compress_statistics,\n \"fp4\",\n quant_storage,\n device,\n )\n\n\nclass LinearNF4(Linear4bit):\n \"\"\"Implements the NF4 data type.\n\n Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that\n is normalized into the range [-1, 1].\n\n For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)\n\n Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in\n the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n compute_dtype=None,","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.LinearNF4","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.LinearNF4#L571-L611","kind":"class","name":"LinearNF4","path":"bitsandbytes/nn/modules.py","language":"python","start_line":571,"end_line":611,"context_start_line":551,"context_end_line":631,"code":" Args:\n input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(\n input_features,\n output_features,\n bias,\n compute_dtype,\n compress_statistics,\n \"fp4\",\n quant_storage,\n device,\n )\n\n\nclass LinearNF4(Linear4bit):\n \"\"\"Implements the NF4 data type.\n\n Constructs a quantization data type where each bin has equal area under a standard normal distribution N(0, 1) that\n is normalized into the range [-1, 1].\n\n For more information read the paper: QLoRA: Efficient Finetuning of Quantized LLMs (https://arxiv.org/abs/2305.14314)\n\n Implementation of the NF4 data type in bitsandbytes can be found in the `create_normal_map` function in\n the `functional.py` file: https://github.com/TimDettmers/bitsandbytes/blob/main/bitsandbytes/functional.py#L236.\n \"\"\"\n\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n compute_dtype=None,\n compress_statistics=True,\n quant_storage=torch.uint8,\n device=None,\n ):\n \"\"\"\n Args:\n input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(\n input_features,\n output_features,\n bias,\n compute_dtype,\n compress_statistics,\n \"nf4\",\n quant_storage,\n device,\n )\n\n\nclass Int8Params(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=True,\n has_fp16_weights=False,\n CB: Optional[torch.Tensor] = None,\n SCB: Optional[torch.Tensor] = None,\n ):\n if data is None:\n data = torch.empty(0)\n obj = torch.Tensor._make_subclass(cls, data, requires_grad)\n obj.CB = CB\n obj.SCB = SCB\n obj.has_fp16_weights = has_fp16_weights\n return obj\n\n def _quantize(self, device):","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Int8Params","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Int8Params#L614-L696","kind":"class","name":"Int8Params","path":"bitsandbytes/nn/modules.py","language":"python","start_line":614,"end_line":696,"context_start_line":594,"context_end_line":716,"code":" Args:\n input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(\n input_features,\n output_features,\n bias,\n compute_dtype,\n compress_statistics,\n \"nf4\",\n quant_storage,\n device,\n )\n\n\nclass Int8Params(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=True,\n has_fp16_weights=False,\n CB: Optional[torch.Tensor] = None,\n SCB: Optional[torch.Tensor] = None,\n ):\n if data is None:\n data = torch.empty(0)\n obj = torch.Tensor._make_subclass(cls, data, requires_grad)\n obj.CB = CB\n obj.SCB = SCB\n obj.has_fp16_weights = has_fp16_weights\n return obj\n\n def _quantize(self, device):\n if self.has_fp16_weights:\n return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n def __deepcopy__(self, memo):\n # adjust this if new arguments are added to the constructor\n new_instance = type(self).__new__(\n type(self),\n data=copy.deepcopy(self.data, memo),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )\n return new_instance\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,\n dtype: Optional[Union[dtype, str]] = ...,\n non_blocking: bool = ...,\n ) -> T: ...\n\n @overload\n def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...\n\n @overload\n def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...\n\n def to(self, *args, **kwargs):\n device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)\n\n if device is not None and device.type != \"meta\" and self.data.device.type == \"cpu\":\n if device.type != \"cpu\" or self.data.dtype != torch.int8:\n return self._quantize(device)\n elif self.data.dtype == torch.int8 and device.type == \"cpu\":\n self.CB = self.data\n\n new_param = Int8Params(\n super().to(device=device, dtype=dtype, non_blocking=non_blocking),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n )\n new_param.CB = self.CB\n new_param.SCB = self.SCB\n\n return new_param\n\n\ndef maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n weight = state_dict.get(f\"{prefix}weight\")\n if weight is None:\n # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing\n return\n weight_format = state_dict.pop(f\"{prefix}weight_format\", \"row\")\n\n if isinstance(weight_format, torch.Tensor):\n weight_format = weight_format.item()\n\n # For new weights format storage type, we explicitly check\n # if weights_format is on the mapping\n if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n raise ValueError(f\"Expected supported weight format - got {weight_format}\")\n elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format]\n\n if weight_format != \"row\":","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.maybe_rearrange_weight","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.maybe_rearrange_weight#L699-L717","kind":"function","name":"maybe_rearrange_weight","path":"bitsandbytes/nn/modules.py","language":"python","start_line":699,"end_line":717,"context_start_line":679,"context_end_line":737,"code":" def to(self, *args, **kwargs):\n device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)\n\n if device is not None and device.type != \"meta\" and self.data.device.type == \"cpu\":\n if device.type != \"cpu\" or self.data.dtype != torch.int8:\n return self._quantize(device)\n elif self.data.dtype == torch.int8 and device.type == \"cpu\":\n self.CB = self.data\n\n new_param = Int8Params(\n super().to(device=device, dtype=dtype, non_blocking=non_blocking),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n )\n new_param.CB = self.CB\n new_param.SCB = self.SCB\n\n return new_param\n\n\ndef maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n weight = state_dict.get(f\"{prefix}weight\")\n if weight is None:\n # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing\n return\n weight_format = state_dict.pop(f\"{prefix}weight_format\", \"row\")\n\n if isinstance(weight_format, torch.Tensor):\n weight_format = weight_format.item()\n\n # For new weights format storage type, we explicitly check\n # if weights_format is on the mapping\n if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n raise ValueError(f\"Expected supported weight format - got {weight_format}\")\n elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format]\n\n if weight_format != \"row\":\n raise ValueError(f\"Only 'row' weight format is supported, got {weight_format}\")\n\n\nclass Embedding8bit(nn.Embedding):\n \"\"\"\n This class implements [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm for embedding layer\n\n Quantization API is similar to Linear8bitLt:\n ```python\n import torch\n import torch.nn as nn\n\n from bitsandbytes.nn import Embedding8bit\n\n fp16_module = nn.Embedding(128, 64)\n int8_module = Embedding8bit(128, 64)\n\n int8_module.load_state_dict(fp16_module.state_dict())\n\n int8_module = int8_module.to(0) # Quantization happens here\n ```","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Embedding8bit","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Embedding8bit#L720-L764","kind":"class","name":"Embedding8bit","path":"bitsandbytes/nn/modules.py","language":"python","start_line":720,"end_line":764,"context_start_line":700,"context_end_line":784,"code":" weight = state_dict.get(f\"{prefix}weight\")\n if weight is None:\n # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing\n return\n weight_format = state_dict.pop(f\"{prefix}weight_format\", \"row\")\n\n if isinstance(weight_format, torch.Tensor):\n weight_format = weight_format.item()\n\n # For new weights format storage type, we explicitly check\n # if weights_format is on the mapping\n if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n raise ValueError(f\"Expected supported weight format - got {weight_format}\")\n elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format]\n\n if weight_format != \"row\":\n raise ValueError(f\"Only 'row' weight format is supported, got {weight_format}\")\n\n\nclass Embedding8bit(nn.Embedding):\n \"\"\"\n This class implements [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm for embedding layer\n\n Quantization API is similar to Linear8bitLt:\n ```python\n import torch\n import torch.nn as nn\n\n from bitsandbytes.nn import Embedding8bit\n\n fp16_module = nn.Embedding(128, 64)\n int8_module = Embedding8bit(128, 64)\n\n int8_module.load_state_dict(fp16_module.state_dict())\n\n int8_module = int8_module.to(0) # Quantization happens here\n ```\n \"\"\"\n\n def __init__(self, num_embeddings, embedding_dim, device=None, dtype=None):\n super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)\n self.dtype = self.weight.data.dtype\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=False, requires_grad=False)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n raise NotImplementedError(\"Saving Embedding8bit module is not implemented\")\n\n def forward(self, input: Tensor) -> Tensor:\n if not hasattr(self.weight, \"SCB\"):\n raise RuntimeError(\"Embedding layer is not quantized. Please call .cuda() or .to(device) first.\")\n\n rows = self.weight.data\n row_stats = self.weight.SCB\n\n assert rows.shape == (self.num_embeddings, self.embedding_dim)\n assert row_stats.shape == (self.num_embeddings,)\n\n compressed_output = F.embedding(input, rows)\n compressed_output_stats = F.embedding(input, row_stats.view(self.num_embeddings, 1))\n\n output = compressed_output * (compressed_output_stats / 127.0)\n\n return output.to(self.dtype)\n\n\nclass Embedding4bit(nn.Embedding):\n \"\"\"\n This is the base class similar to Linear4bit. It implements the 4-bit quantization algorithm presented in\n [QLoRA](https://arxiv.org/abs/2305.14314) for embeddings.\n\n Quantization API is similar to Linear4bit:\n ```python\n import torch\n import torch.nn as nn\n\n from bitsandbytes.nn import Embedding4bit\n\n fp16_module = nn.Embedding(128, 64)\n quantized_module = Embedding4bit(128, 64)\n\n quantized_module.load_state_dict(fp16_module.state_dict())\n\n quantized_module = quantized_module.to(0) # Quantization happens here","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Embedding4bit","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Embedding4bit#L767-L864","kind":"class","name":"Embedding4bit","path":"bitsandbytes/nn/modules.py","language":"python","start_line":767,"end_line":864,"context_start_line":747,"context_end_line":884,"code":" raise NotImplementedError(\"Saving Embedding8bit module is not implemented\")\n\n def forward(self, input: Tensor) -> Tensor:\n if not hasattr(self.weight, \"SCB\"):\n raise RuntimeError(\"Embedding layer is not quantized. Please call .cuda() or .to(device) first.\")\n\n rows = self.weight.data\n row_stats = self.weight.SCB\n\n assert rows.shape == (self.num_embeddings, self.embedding_dim)\n assert row_stats.shape == (self.num_embeddings,)\n\n compressed_output = F.embedding(input, rows)\n compressed_output_stats = F.embedding(input, row_stats.view(self.num_embeddings, 1))\n\n output = compressed_output * (compressed_output_stats / 127.0)\n\n return output.to(self.dtype)\n\n\nclass Embedding4bit(nn.Embedding):\n \"\"\"\n This is the base class similar to Linear4bit. It implements the 4-bit quantization algorithm presented in\n [QLoRA](https://arxiv.org/abs/2305.14314) for embeddings.\n\n Quantization API is similar to Linear4bit:\n ```python\n import torch\n import torch.nn as nn\n\n from bitsandbytes.nn import Embedding4bit\n\n fp16_module = nn.Embedding(128, 64)\n quantized_module = Embedding4bit(128, 64)\n\n quantized_module.load_state_dict(fp16_module.state_dict())\n\n quantized_module = quantized_module.to(0) # Quantization happens here\n ```\n \"\"\"\n\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_type=\"fp4\",\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)\n self.dtype = self.weight.data.dtype\n\n self.weight = Params4bit(\n self.weight.data,\n requires_grad=False,\n compress_statistics=None,\n quant_type=quant_type,\n quant_storage=quant_storage,\n module=self,\n )\n\n blocksize = self.weight.blocksize\n\n if embedding_dim % blocksize != 0:\n warnings.warn(\n f\"Embedding size {embedding_dim} is not divisible by block size {blocksize}. \"\n \"This will lead to slow inference.\",\n )\n\n def _forward_with_partial_dequantize(self, input: Tensor):\n assert self.embedding_dim % self.weight.quant_state.blocksize == 0\n\n w_4bit_uint8 = self.weight.data.view(torch.uint8).view(self.num_embeddings * self.embedding_dim // 2, 1)\n\n output_4bit = torch.nn.functional.embedding(\n weight=w_4bit_uint8.view(self.num_embeddings, self.embedding_dim // 2),\n input=input,\n ).view(-1, 1)\n assert output_4bit.shape == (input.numel() * self.embedding_dim // 2, 1)\n\n blocks_per_emb = self.embedding_dim // self.weight.blocksize\n\n absmax = self.weight.quant_state.absmax\n assert absmax.shape == (self.num_embeddings * blocks_per_emb,)\n\n output_absmax = torch.nn.functional.embedding(\n weight=absmax.view(self.num_embeddings, blocks_per_emb),\n input=input,\n ).view(\n -1,\n )\n assert output_absmax.shape == (input.numel() * blocks_per_emb,)\n\n output_quant_state = copy.deepcopy(self.weight.quant_state)\n output_quant_state.absmax = output_absmax\n output_quant_state.shape = torch.Size((*input.shape, self.embedding_dim))\n\n output = bnb.functional.dequantize_4bit(output_4bit, output_quant_state)\n assert output.shape == (*input.shape, self.embedding_dim)\n\n return output.to(self.dtype)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n raise NotImplementedError(\"Saving Embedding4bit module is not implemented\")\n\n def forward(self, input: Tensor) -> Tensor:\n fix_4bit_weight_quant_state_from_module(self)\n\n if self.embedding_dim % self.weight.quant_state.blocksize == 0:\n return self._forward_with_partial_dequantize(input)\n\n dequantized_weight = bnb.functional.dequantize_4bit(self.weight.data, self.weight.quant_state)\n\n return torch.nn.functional.embedding(\n weight=dequantized_weight,\n input=input,\n ).to(self.dtype)\n\n\nclass EmbeddingFP4(Embedding4bit):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(\n num_embeddings,\n embedding_dim,\n dtype=dtype,\n quant_type=\"fp4\",\n quant_storage=quant_storage,\n device=device,\n )\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.EmbeddingFP4","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.EmbeddingFP4#L867-L883","kind":"class","name":"EmbeddingFP4","path":"bitsandbytes/nn/modules.py","language":"python","start_line":867,"end_line":883,"context_start_line":847,"context_end_line":903,"code":"\n return output.to(self.dtype)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n raise NotImplementedError(\"Saving Embedding4bit module is not implemented\")\n\n def forward(self, input: Tensor) -> Tensor:\n fix_4bit_weight_quant_state_from_module(self)\n\n if self.embedding_dim % self.weight.quant_state.blocksize == 0:\n return self._forward_with_partial_dequantize(input)\n\n dequantized_weight = bnb.functional.dequantize_4bit(self.weight.data, self.weight.quant_state)\n\n return torch.nn.functional.embedding(\n weight=dequantized_weight,\n input=input,\n ).to(self.dtype)\n\n\nclass EmbeddingFP4(Embedding4bit):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(\n num_embeddings,\n embedding_dim,\n dtype=dtype,\n quant_type=\"fp4\",\n quant_storage=quant_storage,\n device=device,\n )\n\n\nclass EmbeddingNF4(Embedding4bit):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(\n num_embeddings,\n embedding_dim,\n dtype=dtype,\n quant_type=\"nf4\",\n quant_storage=quant_storage,\n device=device,\n )\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.EmbeddingNF4","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.EmbeddingNF4#L886-L902","kind":"class","name":"EmbeddingNF4","path":"bitsandbytes/nn/modules.py","language":"python","start_line":886,"end_line":902,"context_start_line":866,"context_end_line":922,"code":"\nclass EmbeddingFP4(Embedding4bit):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(\n num_embeddings,\n embedding_dim,\n dtype=dtype,\n quant_type=\"fp4\",\n quant_storage=quant_storage,\n device=device,\n )\n\n\nclass EmbeddingNF4(Embedding4bit):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(\n num_embeddings,\n embedding_dim,\n dtype=dtype,\n quant_type=\"nf4\",\n quant_storage=quant_storage,\n device=device,\n )\n\n\nclass Linear8bitLt(nn.Linear):\n \"\"\"\n This class is the base module for the [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm.\n To read more about it, have a look at the paper.\n\n In order to quantize a linear layer one should first load the original fp16 / bf16 weights into\n the Linear8bitLt module, then call `int8_module.to(\"cuda\")` to quantize the fp16 weights.\n\n Example:\n\n ```python\n import torch\n import torch.nn as nn\n\n import bitsandbytes as bnb\n from bnb.nn import Linear8bitLt\n\n fp16_model = nn.Sequential(","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.Linear8bitLt","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.Linear8bitLt#L905-L1054","kind":"class","name":"Linear8bitLt","path":"bitsandbytes/nn/modules.py","language":"python","start_line":905,"end_line":1054,"context_start_line":885,"context_end_line":1074,"code":"\nclass EmbeddingNF4(Embedding4bit):\n def __init__(\n self,\n num_embeddings,\n embedding_dim,\n dtype=None,\n quant_storage=torch.uint8,\n device=None,\n ):\n super().__init__(\n num_embeddings,\n embedding_dim,\n dtype=dtype,\n quant_type=\"nf4\",\n quant_storage=quant_storage,\n device=device,\n )\n\n\nclass Linear8bitLt(nn.Linear):\n \"\"\"\n This class is the base module for the [LLM.int8()](https://arxiv.org/abs/2208.07339) algorithm.\n To read more about it, have a look at the paper.\n\n In order to quantize a linear layer one should first load the original fp16 / bf16 weights into\n the Linear8bitLt module, then call `int8_module.to(\"cuda\")` to quantize the fp16 weights.\n\n Example:\n\n ```python\n import torch\n import torch.nn as nn\n\n import bitsandbytes as bnb\n from bnb.nn import Linear8bitLt\n\n fp16_model = nn.Sequential(\n nn.Linear(64, 64),\n nn.Linear(64, 64)\n )\n\n int8_model = nn.Sequential(\n Linear8bitLt(64, 64, has_fp16_weights=False),\n Linear8bitLt(64, 64, has_fp16_weights=False)\n )\n\n int8_model.load_state_dict(fp16_model.state_dict())\n int8_model = int8_model.to(0) # Quantization happens here\n ```\n \"\"\"\n\n def __init__(\n self,\n input_features: int,\n output_features: int,\n bias=True,\n has_fp16_weights=True,\n threshold=0.0,\n index=None,\n device=None,\n ):\n \"\"\"\n Initialize Linear8bitLt class.\n\n Args:\n input_features (`int`):\n Number of input features of the linear layer.\n output_features (`int`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights\n\n if threshold > 0.0 and not has_fp16_weights:\n self.state.use_pool = True\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)\n self._register_load_state_dict_pre_hook(maybe_rearrange_weight)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n super()._save_to_state_dict(destination, prefix, keep_vars)\n\n # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data\n scb_name = \"SCB\"\n\n # case 1: .cuda was called, SCB is in self.weight\n param_from_weight = getattr(self.weight, scb_name)\n # case 2: self.init_8bit_state was called, SCB is in self.state\n param_from_state = getattr(self.state, scb_name)\n\n key_name = prefix + f\"{scb_name}\"\n\n # We now only save in row-major. This format information is stored for backwards compatibility.\n format_name = prefix + \"weight_format\"\n\n if not self.state.has_fp16_weights:\n if param_from_weight is not None:\n destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()\n destination[format_name] = torch.tensor(0, dtype=torch.uint8)\n elif param_from_state is not None:\n destination[key_name] = param_from_state if keep_vars else param_from_state.detach()\n destination[format_name] = torch.tensor(0, dtype=torch.uint8)\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )\n unexpected_copy = list(unexpected_keys)\n\n for key in unexpected_copy:\n input_name = key[len(prefix) :]\n if input_name == \"SCB\":\n if self.weight.SCB is None:\n # buffers not yet initialized, can't access them directly without quantizing first\n raise RuntimeError(\n \"Loading a quantized checkpoint into non-quantized Linear8bitLt is \"\n \"not supported. Please call module.cuda() before module.load_state_dict()\",\n )\n\n input_param = state_dict[key]\n self.weight.SCB.copy_(input_param)\n\n if self.state.SCB is not None:\n self.state.SCB = self.weight.SCB\n\n unexpected_keys.remove(key)\n\n def init_8bit_state(self):\n self.state.CB = self.weight.CB\n self.state.SCB = self.weight.SCB\n self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x: torch.Tensor):\n self.state.is_training = self.training\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)\n\n if not self.state.has_fp16_weights and self.state.CB is not None:\n self.weight.data = self.state.CB\n\n return out\n\n\nclass OutlierAwareLinear(nn.Linear):\n def __init__(self, input_features, output_features, bias=True, device=None):\n super().__init__(input_features, output_features, bias, device)\n self.outlier_dim = None\n self.is_quantized = False\n\n def forward_with_outliers(self, x, outlier_idx):\n raise NotImplementedError(\"Please override the `forward_with_outliers(self, x, outlier_idx)` function\")\n\n def quantize_weight(self, w, outlier_idx):\n raise NotImplementedError(\"Please override the `quantize_weights(self, w, outlier_idx)` function\")\n\n def forward(self, x):\n if self.outlier_dim is None:\n tracer = OutlierTracer.get_instance()\n if not tracer.is_initialized():\n print(\"Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer\")\n outlier_idx = tracer.get_outliers(self.weight)","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.OutlierAwareLinear","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.OutlierAwareLinear#L1057-L1081","kind":"class","name":"OutlierAwareLinear","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1057,"end_line":1081,"context_start_line":1037,"context_end_line":1101,"code":" self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x: torch.Tensor):\n self.state.is_training = self.training\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)\n\n if not self.state.has_fp16_weights and self.state.CB is not None:\n self.weight.data = self.state.CB\n\n return out\n\n\nclass OutlierAwareLinear(nn.Linear):\n def __init__(self, input_features, output_features, bias=True, device=None):\n super().__init__(input_features, output_features, bias, device)\n self.outlier_dim = None\n self.is_quantized = False\n\n def forward_with_outliers(self, x, outlier_idx):\n raise NotImplementedError(\"Please override the `forward_with_outliers(self, x, outlier_idx)` function\")\n\n def quantize_weight(self, w, outlier_idx):\n raise NotImplementedError(\"Please override the `quantize_weights(self, w, outlier_idx)` function\")\n\n def forward(self, x):\n if self.outlier_dim is None:\n tracer = OutlierTracer.get_instance()\n if not tracer.is_initialized():\n print(\"Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer\")\n outlier_idx = tracer.get_outliers(self.weight)\n # print(outlier_idx, tracer.get_hvalue(self.weight))\n self.outlier_dim = outlier_idx\n\n if not self.is_quantized:\n w = self.quantize_weight(self.weight, self.outlier_dim)\n self.weight.data.copy_(w)\n self.is_quantized = True\n\n\nclass SwitchBackLinearBnb(nn.Linear):\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n has_fp16_weights=True,\n memory_efficient_backward=False,\n threshold=0.0,\n index=None,\n device=None,\n ):\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.SwitchBackLinearBnb","uri":"program://bitsandbytes/class/bitsandbytes.nn.modules.SwitchBackLinearBnb#L1084-L1120","kind":"class","name":"SwitchBackLinearBnb","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1084,"end_line":1120,"context_start_line":1064,"context_end_line":1120,"code":" raise NotImplementedError(\"Please override the `forward_with_outliers(self, x, outlier_idx)` function\")\n\n def quantize_weight(self, w, outlier_idx):\n raise NotImplementedError(\"Please override the `quantize_weights(self, w, outlier_idx)` function\")\n\n def forward(self, x):\n if self.outlier_dim is None:\n tracer = OutlierTracer.get_instance()\n if not tracer.is_initialized():\n print(\"Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer\")\n outlier_idx = tracer.get_outliers(self.weight)\n # print(outlier_idx, tracer.get_hvalue(self.weight))\n self.outlier_dim = outlier_idx\n\n if not self.is_quantized:\n w = self.quantize_weight(self.weight, self.outlier_dim)\n self.weight.data.copy_(w)\n self.is_quantized = True\n\n\nclass SwitchBackLinearBnb(nn.Linear):\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n has_fp16_weights=True,\n memory_efficient_backward=False,\n threshold=0.0,\n index=None,\n device=None,\n ):\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights\n self.state.memory_efficient_backward = memory_efficient_backward\n if threshold > 0.0 and not has_fp16_weights:\n self.state.use_pool = True\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)\n\n def init_8bit_state(self):\n self.state.CB = self.weight.CB\n self.state.SCB = self.weight.SCB\n self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x):\n self.state.is_training = self.training\n\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n return bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__init__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__init__#L1085-L1106","kind":"function","name":"__init__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1085,"end_line":1106,"context_start_line":1065,"context_end_line":1120,"code":"\n def quantize_weight(self, w, outlier_idx):\n raise NotImplementedError(\"Please override the `quantize_weights(self, w, outlier_idx)` function\")\n\n def forward(self, x):\n if self.outlier_dim is None:\n tracer = OutlierTracer.get_instance()\n if not tracer.is_initialized():\n print(\"Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer\")\n outlier_idx = tracer.get_outliers(self.weight)\n # print(outlier_idx, tracer.get_hvalue(self.weight))\n self.outlier_dim = outlier_idx\n\n if not self.is_quantized:\n w = self.quantize_weight(self.weight, self.outlier_dim)\n self.weight.data.copy_(w)\n self.is_quantized = True\n\n\nclass SwitchBackLinearBnb(nn.Linear):\n def __init__(\n self,\n input_features,\n output_features,\n bias=True,\n has_fp16_weights=True,\n memory_efficient_backward=False,\n threshold=0.0,\n index=None,\n device=None,\n ):\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights\n self.state.memory_efficient_backward = memory_efficient_backward\n if threshold > 0.0 and not has_fp16_weights:\n self.state.use_pool = True\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)\n\n def init_8bit_state(self):\n self.state.CB = self.weight.CB\n self.state.SCB = self.weight.SCB\n self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x):\n self.state.is_training = self.training\n\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n return bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.reset_parameters","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.reset_parameters#L177-L179","kind":"function","name":"reset_parameters","path":"bitsandbytes/nn/modules.py","language":"python","start_line":177,"end_line":179,"context_start_line":157,"context_end_line":199,"code":" scale_grad_by_freq (`bool`, defaults to `False`):\n Scale gradient by frequency during backpropagation.\n sparse (`bool`, defaults to `False`):\n Computes dense gradients. Set to `True` to compute sparse gradients instead.\n _weight (`Optional[Tensor]`):\n Pretrained embeddings.\n \"\"\"\n super().__init__(\n num_embeddings,\n embedding_dim,\n padding_idx,\n max_norm,\n norm_type,\n scale_grad_by_freq,\n sparse,\n _weight,\n device=device,\n )\n GlobalOptimManager.get_instance().register_module_override(self, \"weight\", {\"optim_bits\": 32})\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight)\n self._fill_padding_idx_with_zero()\n\n \"\"\" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding\n to make the Layer compatible with Pytorch < 1.9.\n This means that if this changes in future PyTorch releases this need to change too\n which is cumbersome. However, with this we can ensure compatibility with previous\n PyTorch releases.\n \"\"\"\n\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules._fill_padding_idx_with_zero","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules._fill_padding_idx_with_zero#L188-L191","kind":"function","name":"_fill_padding_idx_with_zero","path":"bitsandbytes/nn/modules.py","language":"python","start_line":188,"end_line":191,"context_start_line":168,"context_end_line":211,"code":" max_norm,\n norm_type,\n scale_grad_by_freq,\n sparse,\n _weight,\n device=device,\n )\n GlobalOptimManager.get_instance().register_module_override(self, \"weight\", {\"optim_bits\": 32})\n\n def reset_parameters(self) -> None:\n torch.nn.init.xavier_uniform_(self.weight)\n self._fill_padding_idx_with_zero()\n\n \"\"\" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding\n to make the Layer compatible with Pytorch < 1.9.\n This means that if this changes in future PyTorch releases this need to change too\n which is cumbersome. However, with this we can ensure compatibility with previous\n PyTorch releases.\n \"\"\"\n\n def _fill_padding_idx_with_zero(self) -> None:\n if self.padding_idx is not None:\n with torch.no_grad():\n self.weight[self.padding_idx].fill_(0)\n\n def forward(self, input: Tensor) -> Tensor:\n emb = F.embedding(\n input,\n self.weight,\n self.padding_idx,\n self.max_norm,\n self.norm_type,\n self.scale_grad_by_freq,\n self.sparse,\n )\n\n return emb\n\n\nclass Params4bit(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=False, # quantized weights should be frozen by default","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.forward","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.forward#L1114-L1120","kind":"function","name":"forward","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1114,"end_line":1120,"context_start_line":1094,"context_end_line":1120,"code":" device=None,\n ):\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights\n self.state.memory_efficient_backward = memory_efficient_backward\n if threshold > 0.0 and not has_fp16_weights:\n self.state.use_pool = True\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)\n\n def init_8bit_state(self):\n self.state.CB = self.weight.CB\n self.state.SCB = self.weight.SCB\n self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x):\n self.state.is_training = self.training\n\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n return bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__new__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__new__#L615-L629","kind":"function","name":"__new__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":615,"end_line":629,"context_start_line":595,"context_end_line":649,"code":" input_features (`str`):\n Number of input features of the linear layer.\n output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(\n input_features,\n output_features,\n bias,\n compute_dtype,\n compress_statistics,\n \"nf4\",\n quant_storage,\n device,\n )\n\n\nclass Int8Params(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=True,\n has_fp16_weights=False,\n CB: Optional[torch.Tensor] = None,\n SCB: Optional[torch.Tensor] = None,\n ):\n if data is None:\n data = torch.empty(0)\n obj = torch.Tensor._make_subclass(cls, data, requires_grad)\n obj.CB = CB\n obj.SCB = SCB\n obj.has_fp16_weights = has_fp16_weights\n return obj\n\n def _quantize(self, device):\n if self.has_fp16_weights:\n return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__getstate__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__getstate__#L237-L241","kind":"function","name":"__getstate__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":237,"end_line":241,"context_start_line":217,"context_end_line":261,"code":" module: Optional[\"Linear4bit\"] = None,\n bnb_quantized: bool = False,\n ) -> \"Params4bit\":\n if data is None:\n data = torch.empty(0)\n\n if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n\n self = torch.Tensor._make_subclass(cls, data, requires_grad)\n self.blocksize = blocksize\n self.compress_statistics = compress_statistics\n self.quant_type = quant_type\n self.quant_state = quant_state\n self.quant_storage = quant_storage\n self.bnb_quantized = bnb_quantized\n self.data = data\n self.module = module\n return self\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"data\"] = self.data\n state[\"requires_grad\"] = self.requires_grad\n return state\n\n def __setstate__(self, state):\n self.requires_grad = state[\"requires_grad\"]\n self.blocksize = state[\"blocksize\"]\n self.compress_statistics = state[\"compress_statistics\"]\n self.quant_type = state[\"quant_type\"]\n self.quant_state = state[\"quant_state\"]\n self.data = state[\"data\"]\n self.quant_storage = state[\"quant_storage\"]\n self.bnb_quantized = state[\"bnb_quantized\"]\n self.module = state[\"module\"]\n\n def __deepcopy__(self, memo):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n new_instance.quant_state = copy.deepcopy(state[\"quant_state\"])\n new_instance.data = copy.deepcopy(state[\"data\"])\n return new_instance\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__setstate__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__setstate__#L243-L252","kind":"function","name":"__setstate__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":243,"end_line":252,"context_start_line":223,"context_end_line":272,"code":" if blocksize is None:\n blocksize = 64 if not HIP_ENVIRONMENT else 128\n\n self = torch.Tensor._make_subclass(cls, data, requires_grad)\n self.blocksize = blocksize\n self.compress_statistics = compress_statistics\n self.quant_type = quant_type\n self.quant_state = quant_state\n self.quant_storage = quant_storage\n self.bnb_quantized = bnb_quantized\n self.data = data\n self.module = module\n return self\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"data\"] = self.data\n state[\"requires_grad\"] = self.requires_grad\n return state\n\n def __setstate__(self, state):\n self.requires_grad = state[\"requires_grad\"]\n self.blocksize = state[\"blocksize\"]\n self.compress_statistics = state[\"compress_statistics\"]\n self.quant_type = state[\"quant_type\"]\n self.quant_state = state[\"quant_state\"]\n self.data = state[\"data\"]\n self.quant_storage = state[\"quant_storage\"]\n self.bnb_quantized = state[\"bnb_quantized\"]\n self.module = state[\"module\"]\n\n def __deepcopy__(self, memo):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n new_instance.quant_state = copy.deepcopy(state[\"quant_state\"])\n new_instance.data = copy.deepcopy(state[\"data\"])\n return new_instance\n\n def __copy__(self):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n return new_instance\n\n @classmethod\n def from_prequantized(\n cls,\n data: torch.Tensor,\n quantized_stats: dict[str, Any],","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__deepcopy__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__deepcopy__#L653-L663","kind":"function","name":"__deepcopy__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":653,"end_line":663,"context_start_line":633,"context_end_line":683,"code":" return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n def __deepcopy__(self, memo):\n # adjust this if new arguments are added to the constructor\n new_instance = type(self).__new__(\n type(self),\n data=copy.deepcopy(self.data, memo),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )\n return new_instance\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,\n dtype: Optional[Union[dtype, str]] = ...,\n non_blocking: bool = ...,\n ) -> T: ...\n\n @overload\n def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...\n\n @overload\n def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...\n\n def to(self, *args, **kwargs):\n device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)\n\n if device is not None and device.type != \"meta\" and self.data.device.type == \"cpu\":\n if device.type != \"cpu\" or self.data.dtype != torch.int8:","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__copy__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__copy__#L262-L266","kind":"function","name":"__copy__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":262,"end_line":266,"context_start_line":242,"context_end_line":286,"code":"\n def __setstate__(self, state):\n self.requires_grad = state[\"requires_grad\"]\n self.blocksize = state[\"blocksize\"]\n self.compress_statistics = state[\"compress_statistics\"]\n self.quant_type = state[\"quant_type\"]\n self.quant_state = state[\"quant_state\"]\n self.data = state[\"data\"]\n self.quant_storage = state[\"quant_storage\"]\n self.bnb_quantized = state[\"bnb_quantized\"]\n self.module = state[\"module\"]\n\n def __deepcopy__(self, memo):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n new_instance.quant_state = copy.deepcopy(state[\"quant_state\"])\n new_instance.data = copy.deepcopy(state[\"data\"])\n return new_instance\n\n def __copy__(self):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n return new_instance\n\n @classmethod\n def from_prequantized(\n cls,\n data: torch.Tensor,\n quantized_stats: dict[str, Any],\n requires_grad: bool = False,\n device=\"cuda\",\n module: Optional[\"Linear4bit\"] = None,\n **kwargs,\n ) -> \"Params4bit\":\n self = torch.Tensor._make_subclass(cls, data.to(device))\n self.requires_grad = requires_grad\n self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)\n self.blocksize = self.quant_state.blocksize\n self.compress_statistics = self.quant_state.nested\n self.quant_type = self.quant_state.quant_type\n self.bnb_quantized = True\n\n self.quant_storage = data.dtype","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.from_prequantized","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.from_prequantized#L269-L292","kind":"function","name":"from_prequantized","path":"bitsandbytes/nn/modules.py","language":"python","start_line":269,"end_line":292,"context_start_line":249,"context_end_line":312,"code":" self.data = state[\"data\"]\n self.quant_storage = state[\"quant_storage\"]\n self.bnb_quantized = state[\"bnb_quantized\"]\n self.module = state[\"module\"]\n\n def __deepcopy__(self, memo):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n new_instance.quant_state = copy.deepcopy(state[\"quant_state\"])\n new_instance.data = copy.deepcopy(state[\"data\"])\n return new_instance\n\n def __copy__(self):\n new_instance = type(self).__new__(type(self))\n state = self.__getstate__()\n new_instance.__setstate__(state)\n return new_instance\n\n @classmethod\n def from_prequantized(\n cls,\n data: torch.Tensor,\n quantized_stats: dict[str, Any],\n requires_grad: bool = False,\n device=\"cuda\",\n module: Optional[\"Linear4bit\"] = None,\n **kwargs,\n ) -> \"Params4bit\":\n self = torch.Tensor._make_subclass(cls, data.to(device))\n self.requires_grad = requires_grad\n self.quant_state = QuantState.from_dict(qs_dict=quantized_stats, device=device)\n self.blocksize = self.quant_state.blocksize\n self.compress_statistics = self.quant_state.nested\n self.quant_type = self.quant_state.quant_type\n self.bnb_quantized = True\n\n self.quant_storage = data.dtype\n self.module = module\n\n if self.module is not None:\n self.module.quant_state = self.quant_state\n\n return self\n\n def _quantize(self, device):\n w = self.data.contiguous().to(device)\n w_4bit, quant_state = bnb.functional.quantize_4bit(\n w,\n blocksize=self.blocksize,\n compress_statistics=self.compress_statistics,\n quant_type=self.quant_type,\n quant_storage=self.quant_storage,\n )\n self.data = w_4bit\n self.quant_state = quant_state\n if self.module is not None:\n self.module.quant_state = quant_state\n self.bnb_quantized = True\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules._quantize","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules._quantize#L631-L642","kind":"function","name":"_quantize","path":"bitsandbytes/nn/modules.py","language":"python","start_line":631,"end_line":642,"context_start_line":611,"context_end_line":662,"code":" )\n\n\nclass Int8Params(torch.nn.Parameter):\n def __new__(\n cls,\n data: Optional[torch.Tensor] = None,\n requires_grad=True,\n has_fp16_weights=False,\n CB: Optional[torch.Tensor] = None,\n SCB: Optional[torch.Tensor] = None,\n ):\n if data is None:\n data = torch.empty(0)\n obj = torch.Tensor._make_subclass(cls, data, requires_grad)\n obj.CB = CB\n obj.SCB = SCB\n obj.has_fp16_weights = has_fp16_weights\n return obj\n\n def _quantize(self, device):\n if self.has_fp16_weights:\n return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n def __deepcopy__(self, memo):\n # adjust this if new arguments are added to the constructor\n new_instance = type(self).__new__(\n type(self),\n data=copy.deepcopy(self.data, memo),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.cpu","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.cpu#L644-L645","kind":"function","name":"cpu","path":"bitsandbytes/nn/modules.py","language":"python","start_line":644,"end_line":645,"context_start_line":624,"context_end_line":665,"code":" data = torch.empty(0)\n obj = torch.Tensor._make_subclass(cls, data, requires_grad)\n obj.CB = CB\n obj.SCB = SCB\n obj.has_fp16_weights = has_fp16_weights\n return obj\n\n def _quantize(self, device):\n if self.has_fp16_weights:\n return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n def __deepcopy__(self, memo):\n # adjust this if new arguments are added to the constructor\n new_instance = type(self).__new__(\n type(self),\n data=copy.deepcopy(self.data, memo),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )\n return new_instance\n\n @overload","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.cuda","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.cuda#L647-L648","kind":"function","name":"cuda","path":"bitsandbytes/nn/modules.py","language":"python","start_line":647,"end_line":648,"context_start_line":627,"context_end_line":668,"code":" obj.SCB = SCB\n obj.has_fp16_weights = has_fp16_weights\n return obj\n\n def _quantize(self, device):\n if self.has_fp16_weights:\n return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n def __deepcopy__(self, memo):\n # adjust this if new arguments are added to the constructor\n new_instance = type(self).__new__(\n type(self),\n data=copy.deepcopy(self.data, memo),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )\n return new_instance\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.xpu","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.xpu#L650-L651","kind":"function","name":"xpu","path":"bitsandbytes/nn/modules.py","language":"python","start_line":650,"end_line":651,"context_start_line":630,"context_end_line":671,"code":"\n def _quantize(self, device):\n if self.has_fp16_weights:\n return super().to(device)\n\n # We quantize the weight and store in 8bit row-major\n B = self.data.contiguous().to(device=device, dtype=torch.float16)\n CB, SCB, _ = bnb.functional.int8_vectorwise_quant(B)\n self.data = CB\n self.CB = CB\n self.SCB = SCB\n\n return self\n\n def cpu(self):\n return self.to(device=\"cpu\")\n\n def cuda(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"cuda\" if device is None else device, non_blocking=non_blocking)\n\n def xpu(self, device: Optional[Union[int, device, str]] = None, non_blocking: bool = False):\n return self.to(device=\"xpu\" if device is None else device, non_blocking=non_blocking)\n\n def __deepcopy__(self, memo):\n # adjust this if new arguments are added to the constructor\n new_instance = type(self).__new__(\n type(self),\n data=copy.deepcopy(self.data, memo),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )\n return new_instance\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,\n dtype: Optional[Union[dtype, str]] = ...,\n non_blocking: bool = ...,\n ) -> T: ...","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.to","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.to#L679-L696","kind":"function","name":"to","path":"bitsandbytes/nn/modules.py","language":"python","start_line":679,"end_line":696,"context_start_line":659,"context_end_line":716,"code":" has_fp16_weights=self.has_fp16_weights,\n CB=copy.deepcopy(self.CB, memo),\n SCB=copy.deepcopy(self.SCB, memo),\n )\n return new_instance\n\n @overload\n def to(\n self: T,\n device: Optional[Union[int, device]] = ...,\n dtype: Optional[Union[dtype, str]] = ...,\n non_blocking: bool = ...,\n ) -> T: ...\n\n @overload\n def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T: ...\n\n @overload\n def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T: ...\n\n def to(self, *args, **kwargs):\n device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)\n\n if device is not None and device.type != \"meta\" and self.data.device.type == \"cpu\":\n if device.type != \"cpu\" or self.data.dtype != torch.int8:\n return self._quantize(device)\n elif self.data.dtype == torch.int8 and device.type == \"cpu\":\n self.CB = self.data\n\n new_param = Int8Params(\n super().to(device=device, dtype=dtype, non_blocking=non_blocking),\n requires_grad=self.requires_grad,\n has_fp16_weights=self.has_fp16_weights,\n )\n new_param.CB = self.CB\n new_param.SCB = self.SCB\n\n return new_param\n\n\ndef maybe_rearrange_weight(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n weight = state_dict.get(f\"{prefix}weight\")\n if weight is None:\n # if the state dict has no weights for this layer (e.g., LoRA finetuning), do nothing\n return\n weight_format = state_dict.pop(f\"{prefix}weight_format\", \"row\")\n\n if isinstance(weight_format, torch.Tensor):\n weight_format = weight_format.item()\n\n # For new weights format storage type, we explicitly check\n # if weights_format is on the mapping\n if isinstance(weight_format, int) and weight_format not in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n raise ValueError(f\"Expected supported weight format - got {weight_format}\")\n elif isinstance(weight_format, int) and weight_format in INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING:\n weight_format = INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING[weight_format]\n\n if weight_format != \"row\":","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.__torch_function__","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.__torch_function__#L356-L393","kind":"function","name":"__torch_function__","path":"bitsandbytes/nn/modules.py","language":"python","start_line":356,"end_line":393,"context_start_line":336,"context_end_line":413,"code":" if device is not None and device.type != \"meta\" and not self.bnb_quantized:\n return self._quantize(device)\n else:\n if self.quant_state is not None:\n self.quant_state.to(device)\n\n new_param = Params4bit(\n super().to(device=device, dtype=dtype, non_blocking=non_blocking),\n requires_grad=self.requires_grad,\n quant_state=self.quant_state,\n blocksize=self.blocksize,\n compress_statistics=self.compress_statistics,\n quant_type=self.quant_type,\n quant_storage=self.quant_storage,\n bnb_quantized=self.bnb_quantized,\n )\n\n return new_param\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n if kwargs is None:\n kwargs = {}\n\n if func in [torch.chunk, torch.split]:\n tensor = args[0]\n\n result = super().__torch_function__(func, types, args, kwargs)\n\n if isinstance(result, tuple):\n return tuple(\n cls(\n data=chunk,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n for chunk in result\n )\n else:\n return cls(\n data=result,\n requires_grad=tensor.requires_grad,\n quant_state=tensor.quant_state,\n blocksize=tensor.blocksize,\n compress_statistics=tensor.compress_statistics,\n quant_type=tensor.quant_type,\n quant_storage=tensor.quant_storage,\n module=tensor.module,\n bnb_quantized=tensor.bnb_quantized,\n )\n\n return super().__torch_function__(func, types, args, kwargs)\n\n\ndef fix_4bit_weight_quant_state_from_module(module: Union[\"Embedding4bit\", \"Linear4bit\"]):\n if getattr(module.weight, \"quant_state\", None) is not None:\n return\n\n if getattr(module, \"quant_state\", None) is None:\n warnings.warn(\n \"FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first.\",\n )\n\n # the quant state got lost when the parameter got converted. This happens for example for fsdp\n # since we registered the module, we can recover the state here\n assert module.weight.shape[1] == 1\n if not isinstance(module.weight, Params4bit):\n module.weight = Params4bit(module.weight, quant_storage=module.quant_storage, bnb_quantized=True)\n module.weight.quant_state = module.quant_state\n\n\nclass Linear4bit(nn.Linear):","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.set_compute_type","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.set_compute_type#L483-L501","kind":"function","name":"set_compute_type","path":"bitsandbytes/nn/modules.py","language":"python","start_line":483,"end_line":501,"context_start_line":463,"context_end_line":521,"code":" output_features (`str`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(input_features, output_features, bias, device)\n self.weight = Params4bit(\n self.weight.data,\n requires_grad=False,\n compress_statistics=compress_statistics,\n quant_type=quant_type,\n quant_storage=quant_storage,\n module=self,\n )\n # self.persistent_buffers = [] # TODO consider as way to save quant state\n self.compute_dtype = compute_dtype\n self.compute_type_is_set = compute_dtype is not None\n self.quant_state = None\n self.quant_storage = quant_storage\n\n def set_compute_type(self, x):\n if x.dtype in [torch.float32, torch.bfloat16]:\n # the input is in a dtype that is safe to compute in, we switch\n # to this type for speed and stability\n self.compute_dtype = x.dtype\n elif x.dtype == torch.float16:\n # we take the compoute dtype passed into the layer\n if self.compute_dtype in [None, torch.float32] and (x.numel() == x.shape[-1]):\n # single batch inference with input torch.float16 and compute_dtype float32 -> slow inference when it could be fast\n # warn the user about this\n warnings.warn(\n \"Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference.\",\n )\n warnings.filterwarnings(\"ignore\", message=\".*inference.\")\n if self.compute_dtype in [None, torch.float32] and (x.numel() != x.shape[-1]):\n warnings.warn(\n \"Input type into Linear4bit is torch.float16, but bnb_4bit_compute_dtype=torch.float32 (default). This will lead to slow inference or training speed.\",\n )\n warnings.filterwarnings(\"ignore\", message=\".*inference or training\")\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n \"\"\"\n save weight and bias,\n then fill state_dict with components of quant_state\n \"\"\"\n super()._save_to_state_dict(destination, prefix, keep_vars) # saving weight and bias\n\n if getattr(self.weight, \"quant_state\", None) is not None:\n for k, v in self.weight.quant_state.as_dict(packed=True).items():\n destination[prefix + \"weight.\" + k] = v if keep_vars else v.detach()\n\n def forward(self, x: torch.Tensor):\n fix_4bit_weight_quant_state_from_module(self)\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n if not self.compute_type_is_set:","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules._save_to_state_dict","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules._save_to_state_dict#L971-L993","kind":"function","name":"_save_to_state_dict","path":"bitsandbytes/nn/modules.py","language":"python","start_line":971,"end_line":993,"context_start_line":951,"context_end_line":1013,"code":" input_features (`int`):\n Number of input features of the linear layer.\n output_features (`int`):\n Number of output features of the linear layer.\n bias (`bool`, defaults to `True`):\n Whether the linear class uses the bias term as well.\n \"\"\"\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights\n\n if threshold > 0.0 and not has_fp16_weights:\n self.state.use_pool = True\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)\n self._register_load_state_dict_pre_hook(maybe_rearrange_weight)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n super()._save_to_state_dict(destination, prefix, keep_vars)\n\n # we only need to save SCB as extra data, because CB for quantized weights is already stored in weight.data\n scb_name = \"SCB\"\n\n # case 1: .cuda was called, SCB is in self.weight\n param_from_weight = getattr(self.weight, scb_name)\n # case 2: self.init_8bit_state was called, SCB is in self.state\n param_from_state = getattr(self.state, scb_name)\n\n key_name = prefix + f\"{scb_name}\"\n\n # We now only save in row-major. This format information is stored for backwards compatibility.\n format_name = prefix + \"weight_format\"\n\n if not self.state.has_fp16_weights:\n if param_from_weight is not None:\n destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()\n destination[format_name] = torch.tensor(0, dtype=torch.uint8)\n elif param_from_state is not None:\n destination[key_name] = param_from_state if keep_vars else param_from_state.detach()\n destination[format_name] = torch.tensor(0, dtype=torch.uint8)\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules._forward_with_partial_dequantize","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules._forward_with_partial_dequantize#L817-L848","kind":"function","name":"_forward_with_partial_dequantize","path":"bitsandbytes/nn/modules.py","language":"python","start_line":817,"end_line":848,"context_start_line":797,"context_end_line":868,"code":" super().__init__(num_embeddings, embedding_dim, device=device, dtype=dtype)\n self.dtype = self.weight.data.dtype\n\n self.weight = Params4bit(\n self.weight.data,\n requires_grad=False,\n compress_statistics=None,\n quant_type=quant_type,\n quant_storage=quant_storage,\n module=self,\n )\n\n blocksize = self.weight.blocksize\n\n if embedding_dim % blocksize != 0:\n warnings.warn(\n f\"Embedding size {embedding_dim} is not divisible by block size {blocksize}. \"\n \"This will lead to slow inference.\",\n )\n\n def _forward_with_partial_dequantize(self, input: Tensor):\n assert self.embedding_dim % self.weight.quant_state.blocksize == 0\n\n w_4bit_uint8 = self.weight.data.view(torch.uint8).view(self.num_embeddings * self.embedding_dim // 2, 1)\n\n output_4bit = torch.nn.functional.embedding(\n weight=w_4bit_uint8.view(self.num_embeddings, self.embedding_dim // 2),\n input=input,\n ).view(-1, 1)\n assert output_4bit.shape == (input.numel() * self.embedding_dim // 2, 1)\n\n blocks_per_emb = self.embedding_dim // self.weight.blocksize\n\n absmax = self.weight.quant_state.absmax\n assert absmax.shape == (self.num_embeddings * blocks_per_emb,)\n\n output_absmax = torch.nn.functional.embedding(\n weight=absmax.view(self.num_embeddings, blocks_per_emb),\n input=input,\n ).view(\n -1,\n )\n assert output_absmax.shape == (input.numel() * blocks_per_emb,)\n\n output_quant_state = copy.deepcopy(self.weight.quant_state)\n output_quant_state.absmax = output_absmax\n output_quant_state.shape = torch.Size((*input.shape, self.embedding_dim))\n\n output = bnb.functional.dequantize_4bit(output_4bit, output_quant_state)\n assert output.shape == (*input.shape, self.embedding_dim)\n\n return output.to(self.dtype)\n\n def _save_to_state_dict(self, destination, prefix, keep_vars):\n raise NotImplementedError(\"Saving Embedding4bit module is not implemented\")\n\n def forward(self, input: Tensor) -> Tensor:\n fix_4bit_weight_quant_state_from_module(self)\n\n if self.embedding_dim % self.weight.quant_state.blocksize == 0:\n return self._forward_with_partial_dequantize(input)\n\n dequantized_weight = bnb.functional.dequantize_4bit(self.weight.data, self.weight.quant_state)\n\n return torch.nn.functional.embedding(\n weight=dequantized_weight,\n input=input,\n ).to(self.dtype)\n\n\nclass EmbeddingFP4(Embedding4bit):\n def __init__(","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules._load_from_state_dict","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules._load_from_state_dict#L995-L1032","kind":"function","name":"_load_from_state_dict","path":"bitsandbytes/nn/modules.py","language":"python","start_line":995,"end_line":1032,"context_start_line":975,"context_end_line":1052,"code":" scb_name = \"SCB\"\n\n # case 1: .cuda was called, SCB is in self.weight\n param_from_weight = getattr(self.weight, scb_name)\n # case 2: self.init_8bit_state was called, SCB is in self.state\n param_from_state = getattr(self.state, scb_name)\n\n key_name = prefix + f\"{scb_name}\"\n\n # We now only save in row-major. This format information is stored for backwards compatibility.\n format_name = prefix + \"weight_format\"\n\n if not self.state.has_fp16_weights:\n if param_from_weight is not None:\n destination[key_name] = param_from_weight if keep_vars else param_from_weight.detach()\n destination[format_name] = torch.tensor(0, dtype=torch.uint8)\n elif param_from_state is not None:\n destination[key_name] = param_from_state if keep_vars else param_from_state.detach()\n destination[format_name] = torch.tensor(0, dtype=torch.uint8)\n\n def _load_from_state_dict(\n self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n ):\n super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )\n unexpected_copy = list(unexpected_keys)\n\n for key in unexpected_copy:\n input_name = key[len(prefix) :]\n if input_name == \"SCB\":\n if self.weight.SCB is None:\n # buffers not yet initialized, can't access them directly without quantizing first\n raise RuntimeError(\n \"Loading a quantized checkpoint into non-quantized Linear8bitLt is \"\n \"not supported. Please call module.cuda() before module.load_state_dict()\",\n )\n\n input_param = state_dict[key]\n self.weight.SCB.copy_(input_param)\n\n if self.state.SCB is not None:\n self.state.SCB = self.weight.SCB\n\n unexpected_keys.remove(key)\n\n def init_8bit_state(self):\n self.state.CB = self.weight.CB\n self.state.SCB = self.weight.SCB\n self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x: torch.Tensor):\n self.state.is_training = self.training\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)\n\n if not self.state.has_fp16_weights and self.state.CB is not None:\n self.weight.data = self.state.CB","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.init_8bit_state","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.init_8bit_state#L1108-L1112","kind":"function","name":"init_8bit_state","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1108,"end_line":1112,"context_start_line":1088,"context_end_line":1120,"code":" output_features,\n bias=True,\n has_fp16_weights=True,\n memory_efficient_backward=False,\n threshold=0.0,\n index=None,\n device=None,\n ):\n super().__init__(input_features, output_features, bias, device)\n self.state = bnb.MatmulLtState()\n self.index = index\n\n self.state.threshold = threshold\n self.state.has_fp16_weights = has_fp16_weights\n self.state.memory_efficient_backward = memory_efficient_backward\n if threshold > 0.0 and not has_fp16_weights:\n self.state.use_pool = True\n\n self.weight = Int8Params(self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights)\n\n def init_8bit_state(self):\n self.state.CB = self.weight.CB\n self.state.SCB = self.weight.SCB\n self.weight.CB = None\n self.weight.SCB = None\n\n def forward(self, x):\n self.state.is_training = self.training\n\n if self.weight.CB is not None:\n self.init_8bit_state()\n\n return bnb.matmul_mixed(x.half(), self.weight.half(), bias=None, state=self.state) + self.bias","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.forward_with_outliers","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.forward_with_outliers#L1063-L1064","kind":"function","name":"forward_with_outliers","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1063,"end_line":1064,"context_start_line":1043,"context_end_line":1084,"code":" self.init_8bit_state()\n\n # weights are cast automatically as Int8Params, but the bias has to be cast manually\n if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)\n\n if not self.state.has_fp16_weights and self.state.CB is not None:\n self.weight.data = self.state.CB\n\n return out\n\n\nclass OutlierAwareLinear(nn.Linear):\n def __init__(self, input_features, output_features, bias=True, device=None):\n super().__init__(input_features, output_features, bias, device)\n self.outlier_dim = None\n self.is_quantized = False\n\n def forward_with_outliers(self, x, outlier_idx):\n raise NotImplementedError(\"Please override the `forward_with_outliers(self, x, outlier_idx)` function\")\n\n def quantize_weight(self, w, outlier_idx):\n raise NotImplementedError(\"Please override the `quantize_weights(self, w, outlier_idx)` function\")\n\n def forward(self, x):\n if self.outlier_dim is None:\n tracer = OutlierTracer.get_instance()\n if not tracer.is_initialized():\n print(\"Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer\")\n outlier_idx = tracer.get_outliers(self.weight)\n # print(outlier_idx, tracer.get_hvalue(self.weight))\n self.outlier_dim = outlier_idx\n\n if not self.is_quantized:\n w = self.quantize_weight(self.weight, self.outlier_dim)\n self.weight.data.copy_(w)\n self.is_quantized = True\n\n\nclass SwitchBackLinearBnb(nn.Linear):","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.nn.modules.quantize_weight","uri":"program://bitsandbytes/function/bitsandbytes.nn.modules.quantize_weight#L1066-L1067","kind":"function","name":"quantize_weight","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1066,"end_line":1067,"context_start_line":1046,"context_end_line":1087,"code":" if self.bias is not None and self.bias.dtype != x.dtype:\n self.bias.data = self.bias.data.to(x.dtype)\n\n out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)\n\n if not self.state.has_fp16_weights and self.state.CB is not None:\n self.weight.data = self.state.CB\n\n return out\n\n\nclass OutlierAwareLinear(nn.Linear):\n def __init__(self, input_features, output_features, bias=True, device=None):\n super().__init__(input_features, output_features, bias, device)\n self.outlier_dim = None\n self.is_quantized = False\n\n def forward_with_outliers(self, x, outlier_idx):\n raise NotImplementedError(\"Please override the `forward_with_outliers(self, x, outlier_idx)` function\")\n\n def quantize_weight(self, w, outlier_idx):\n raise NotImplementedError(\"Please override the `quantize_weights(self, w, outlier_idx)` function\")\n\n def forward(self, x):\n if self.outlier_dim is None:\n tracer = OutlierTracer.get_instance()\n if not tracer.is_initialized():\n print(\"Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer\")\n outlier_idx = tracer.get_outliers(self.weight)\n # print(outlier_idx, tracer.get_hvalue(self.weight))\n self.outlier_dim = outlier_idx\n\n if not self.is_quantized:\n w = self.quantize_weight(self.weight, self.outlier_dim)\n self.weight.data.copy_(w)\n self.is_quantized = True\n\n\nclass SwitchBackLinearBnb(nn.Linear):\n def __init__(\n self,\n input_features,","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions","uri":"program://bitsandbytes/module/bitsandbytes.autograd._functions#L1-L448","kind":"module","name":"bitsandbytes.autograd._functions","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":1,"end_line":448,"context_start_line":1,"context_end_line":448,"code":"from dataclasses import dataclass\nfrom math import prod\nfrom typing import Callable, Optional\nimport warnings\nfrom warnings import warn\n\nimport torch\nfrom typing_extensions import deprecated\n\nimport bitsandbytes.functional as F\n\n# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:\n# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py\n\n\n\"\"\"\n This class pools outlier dimensions across layers.\n This is particularly important for small models where outlier features\n are less systematic and occur with low frequency.\n\"\"\"\n\n\nclass GlobalOutlierPooler:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef get_inverse_transform_indices(\n transform_tile: Callable[[torch.Tensor], torch.Tensor],\n tile_size: tuple[int, int],\n):\n \"\"\"\n Compute a permutation of indices that invert the specified (tiled) matrix transformation\n\n :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]\n :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere\n :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size\n :example: transform_tile function for the turing layout (bitsandbytes.functional as F)\n :returns: indices\n \"\"\"\n d1, d2 = tile_size\n assert 0 < d1 * d2 < 2**64\n tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2)\n # encode each position in tile as a tuple of <= 8 unique bytes\n permuted_tile_indices = torch.zeros_like(tile_indices)\n for i in range(8):\n # select i-th byte, apply transformation and trace where each index ended up\n ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode=\"trunc\") % 256\n sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous()\n assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), \"int overflow\"\n permuted_tile_i = transform_tile(sample_tile_i)\n ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128\n permuted_tile_indices += ith_permuted_indices * (256**i)\n if d1 * d2 < 256**i:\n break # if all indices fit in i bytes, stop early\n return permuted_tile_indices\n\n\n_is_compiling = torch.compiler.is_compiling\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n Undo a tiled permutation such as turing or ampere layout\n\n :param permuted_tensor: torch tensor in a permuted layout\n :param tile_indices: reverse transformation indices, from get_inverse_transform_indices\n :return: contiguous row-major tensor\n \"\"\"\n (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape\n assert rows % tile_rows == cols % tile_cols == 0, \"tensor must contain a whole number of tiles\"\n tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t()\n outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda\n outputs[tile_indices.flatten()] = tensor\n outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows)\n outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols)\n return outputs.reshape(rows, cols).contiguous()\n\n\n@dataclass\nclass MatmulLtState:\n _tile_indices: Optional[torch.Tensor] = None # TODO: remove\n\n force_no_igemmlt: bool = False\n\n CB: Optional[torch.Tensor] = None\n CxB: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SB: Optional[torch.Tensor] = None\n SCB: Optional[torch.Tensor] = None\n\n CxBt: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SBt: Optional[torch.Tensor] = None\n CBt: Optional[torch.Tensor] = None\n\n subB: Optional[torch.Tensor] = None\n\n outlier_pool: Optional[GlobalOutlierPooler] = None\n has_accumulated_gradients = False\n threshold = 0.0\n idx: Optional[torch.Tensor] = None\n is_training = True\n has_fp16_weights = True\n use_pool = False\n formatB = \"row\" # TODO: Deprecate/remove\n\n def reset_grads(self):\n self.CB = None\n self.CxB = None\n self.SB = None\n self.SCB = None\n\n self.CxBt = None\n self.SBt = None\n self.CBt = None\n\n @property\n def tile_indices(self):\n raise ValueError(\"tile_indices is no longer supported.\")\n\n\nclass MatMul8bitLt(torch.autograd.Function):\n @staticmethod\n def forward(\n ctx: torch.autograd.function.FunctionCtx,\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n ):\n state = state or MatmulLtState()\n\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n if A.shape[-1] == B.shape[0]:\n return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)\n\n input_shape = A.shape\n\n # Cast A to fp16\n if A.dtype != torch.float16 and not _is_compiling():\n warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n\n if len(A.shape) == 3:\n A = A.reshape(-1, A.shape[-1])\n\n # 1. Quantize A. Note that as a side-effect, outliers are suppressed in CA/CAt.\n if ctx.needs_input_grad[1]:\n # Slower path\n CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold)\n else:\n # Fast path\n CA, SCA, outlier_cols = F.int8_vectorwise_quant(A.to(torch.float16), threshold=state.threshold)\n CAt = SCAt = None\n\n has_grad = False\n\n if state.has_fp16_weights or state.CB is None:\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.CB is None or state.SCB is None:\n state.reset_grads()\n\n # 2. Quantize B\n state.CB, state.SCB, _ = F.int8_vectorwise_quant(B.to(torch.float16))\n\n # Handle sparse decomposition\n if state.threshold > 0.0:\n state.idx = outlier_cols\n\n # Mixed Int8 Matmul + Dequant + Bias\n output, subA = torch.ops.bitsandbytes.int8_mixed_scaled_mm(\n A,\n CA,\n state.CB,\n SCA,\n state.SCB,\n outlier_cols,\n bias,\n )\n\n else:\n # Int8 Matmul + Dequant + Bias\n output = torch.ops.bitsandbytes.int8_scaled_mm.default(\n CA, state.CB, SCA, state.SCB, bias=bias, dtype=A.dtype\n )\n subA = None\n\n # 5. Save state\n ctx.state = state\n\n ctx.grad_shape = input_shape\n ctx.dtype_A = A.dtype\n ctx.dtype_bias = None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (CAt, subA, A)\n ctx.tensor_states = (SCAt, state.idx)\n else:\n ctx.tensors = [None, None, None]\n ctx.tensor_states = (None, None)\n ctx.save_for_backward(None, None)\n\n output_shape = (*input_shape[:-1], state.CB.shape[0])\n\n if len(input_shape) == 3:\n return output.reshape(output_shape)\n\n return output\n\n @staticmethod\n def backward(ctx: torch.autograd.function.FunctionCtx, grad_output: torch.Tensor):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n CAt, subA, A = ctx.tensors\n SCAt, idx = ctx.tensor_states\n state: MatmulLtState = ctx.state\n grad_A = grad_B = grad_bias = None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n if req_gradB:\n Cgrad, _, _, SCgradt, _ = F.int8_double_quant(grad_output.to(torch.float16))\n\n grad_B = torch.ops.bitsandbytes.int8_scaled_mm.default(\n Cgrad.t().contiguous(),\n CAt.t(),\n SCgradt,\n SCAt,\n dtype=torch.float16,\n )\n\n if state.threshold > 0.0 and subA is not None and subA.numel() > 0:\n grad_B[:, idx] += torch.matmul(grad_output.t(), subA)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul8bitFp(torch.autograd.Function):\n # For Intel CPU and XPU MatMul8bitFp is much faster (~3x) than MatMul8bitLt in finetune.\n # Because the MatMul8bitLt has more mechanisms in computing grad.\n # We don't have fast kernel for quant/dequant 8bit in CPU/XPU, so it's very slow.\n # We'd like to use dequant + matmul to run finetune with good performance.\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):\n if state.has_fp16_weights or state.CB is None:\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.CB is None or state.SCB is None:\n state.reset_grads()\n state.CB, state.SCB, _ = F.int8_vectorwise_quant(B.to(torch.float16))\n B = state.CB\n\n CB = state.CB.data.to(A.dtype).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n output = torch.nn.functional.linear(A, CB, bias)\n ctx.state = state\n ctx.dtype_A = A.dtype\n ctx.grad_shape = A.shape\n ctx.A = A\n ctx.dtype_bias = None if bias is None else bias.dtype\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n A = ctx.A\n state = ctx.state\n grad_A = grad_B = grad_bias = None\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n if req_gradB:\n grad_B = torch.matmul(A.t(), grad_output).t()\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul4Bit(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, quant_state: Optional[F.QuantState] = None):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n B_shape = quant_state.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)\n\n # 3. Save state\n ctx.state = quant_state\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (None, B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, _, _, req_gradBias, _ = ctx.needs_input_grad\n _, B = ctx.tensors\n\n grad_A, grad_B, grad_bias = None, None, None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # not supported by PyTorch. TODO: create work-around\n # if req_gradB: grad_B = torch.matmul(grad_output.t(), A)\n if req_gradA:\n grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef matmul(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias: Optional[torch.Tensor] = None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n # MatMul8bitLt is slower because no fast kernel for quant/dequant 8bit in CPU/XPU\n if state.is_training:\n if A.device.type in (\"cpu\", \"xpu\"):\n return MatMul8bitFp.apply(A, B, out, bias, state)\n return MatMul8bitLt.apply(A, B, out, bias, state)\n\n\ndef matmul_4bit(\n A: torch.Tensor,\n B: torch.Tensor,\n quant_state: F.QuantState,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n):\n assert quant_state is not None\n\n if A.numel() == A.shape[-1] and A.requires_grad == False and A.device.type != \"hpu\":\n if A.shape[-1] % quant_state.blocksize != 0:\n warn(\n f\"Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}\",\n )\n return MatMul4Bit.apply(A, B, out, bias, quant_state)\n else:\n out = F.gemv_4bit(A, B.t(), out, state=quant_state)\n if bias is not None:\n out += bias\n return out\n else:\n return MatMul4Bit.apply(A, B, out, bias, quant_state)","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.GlobalOutlierPooler","uri":"program://bitsandbytes/class/bitsandbytes.autograd._functions.GlobalOutlierPooler#L23-L49","kind":"class","name":"GlobalOutlierPooler","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":23,"end_line":49,"context_start_line":3,"context_end_line":69,"code":"from typing import Callable, Optional\nimport warnings\nfrom warnings import warn\n\nimport torch\nfrom typing_extensions import deprecated\n\nimport bitsandbytes.functional as F\n\n# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:\n# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py\n\n\n\"\"\"\n This class pools outlier dimensions across layers.\n This is particularly important for small models where outlier features\n are less systematic and occur with low frequency.\n\"\"\"\n\n\nclass GlobalOutlierPooler:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef get_inverse_transform_indices(\n transform_tile: Callable[[torch.Tensor], torch.Tensor],\n tile_size: tuple[int, int],\n):\n \"\"\"\n Compute a permutation of indices that invert the specified (tiled) matrix transformation\n\n :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]\n :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere\n :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size\n :example: transform_tile function for the turing layout (bitsandbytes.functional as F)\n :returns: indices\n \"\"\"\n d1, d2 = tile_size","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.get_inverse_transform_indices","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.get_inverse_transform_indices#L56-L84","kind":"function","name":"get_inverse_transform_indices","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":56,"end_line":84,"context_start_line":36,"context_end_line":104,"code":" cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef get_inverse_transform_indices(\n transform_tile: Callable[[torch.Tensor], torch.Tensor],\n tile_size: tuple[int, int],\n):\n \"\"\"\n Compute a permutation of indices that invert the specified (tiled) matrix transformation\n\n :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]\n :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere\n :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size\n :example: transform_tile function for the turing layout (bitsandbytes.functional as F)\n :returns: indices\n \"\"\"\n d1, d2 = tile_size\n assert 0 < d1 * d2 < 2**64\n tile_indices = torch.arange(d1 * d2, dtype=torch.int64).view(d1, d2)\n # encode each position in tile as a tuple of <= 8 unique bytes\n permuted_tile_indices = torch.zeros_like(tile_indices)\n for i in range(8):\n # select i-th byte, apply transformation and trace where each index ended up\n ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode=\"trunc\") % 256\n sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous()\n assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), \"int overflow\"\n permuted_tile_i = transform_tile(sample_tile_i)\n ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128\n permuted_tile_indices += ith_permuted_indices * (256**i)\n if d1 * d2 < 256**i:\n break # if all indices fit in i bytes, stop early\n return permuted_tile_indices\n\n\n_is_compiling = torch.compiler.is_compiling\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n Undo a tiled permutation such as turing or ampere layout\n\n :param permuted_tensor: torch tensor in a permuted layout\n :param tile_indices: reverse transformation indices, from get_inverse_transform_indices\n :return: contiguous row-major tensor\n \"\"\"\n (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape\n assert rows % tile_rows == cols % tile_cols == 0, \"tensor must contain a whole number of tiles\"\n tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t()","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.undo_layout","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.undo_layout#L94-L109","kind":"function","name":"undo_layout","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":94,"end_line":109,"context_start_line":74,"context_end_line":129,"code":" for i in range(8):\n # select i-th byte, apply transformation and trace where each index ended up\n ith_dim_indices = torch.div(tile_indices, 256**i, rounding_mode=\"trunc\") % 256\n sample_tile_i = (ith_dim_indices - 128).to(torch.int8).contiguous()\n assert torch.all(sample_tile_i.int() + 128 == ith_dim_indices), \"int overflow\"\n permuted_tile_i = transform_tile(sample_tile_i)\n ith_permuted_indices = permuted_tile_i.to(tile_indices.dtype) + 128\n permuted_tile_indices += ith_permuted_indices * (256**i)\n if d1 * d2 < 256**i:\n break # if all indices fit in i bytes, stop early\n return permuted_tile_indices\n\n\n_is_compiling = torch.compiler.is_compiling\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n Undo a tiled permutation such as turing or ampere layout\n\n :param permuted_tensor: torch tensor in a permuted layout\n :param tile_indices: reverse transformation indices, from get_inverse_transform_indices\n :return: contiguous row-major tensor\n \"\"\"\n (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape\n assert rows % tile_rows == cols % tile_cols == 0, \"tensor must contain a whole number of tiles\"\n tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t()\n outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda\n outputs[tile_indices.flatten()] = tensor\n outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows)\n outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols)\n return outputs.reshape(rows, cols).contiguous()\n\n\n@dataclass\nclass MatmulLtState:\n _tile_indices: Optional[torch.Tensor] = None # TODO: remove\n\n force_no_igemmlt: bool = False\n\n CB: Optional[torch.Tensor] = None\n CxB: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SB: Optional[torch.Tensor] = None\n SCB: Optional[torch.Tensor] = None\n\n CxBt: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SBt: Optional[torch.Tensor] = None\n CBt: Optional[torch.Tensor] = None\n\n subB: Optional[torch.Tensor] = None\n\n outlier_pool: Optional[GlobalOutlierPooler] = None","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.MatmulLtState","uri":"program://bitsandbytes/class/bitsandbytes.autograd._functions.MatmulLtState#L113-L150","kind":"class","name":"MatmulLtState","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":113,"end_line":150,"context_start_line":93,"context_end_line":170,"code":")\ndef undo_layout(permuted_tensor: torch.Tensor, tile_indices: torch.LongTensor) -> torch.Tensor:\n \"\"\"\n Undo a tiled permutation such as turing or ampere layout\n\n :param permuted_tensor: torch tensor in a permuted layout\n :param tile_indices: reverse transformation indices, from get_inverse_transform_indices\n :return: contiguous row-major tensor\n \"\"\"\n (rows, cols), (tile_rows, tile_cols) = permuted_tensor.shape, tile_indices.shape\n assert rows % tile_rows == cols % tile_cols == 0, \"tensor must contain a whole number of tiles\"\n tensor = permuted_tensor.reshape(-1, tile_indices.numel()).t()\n outputs = torch.empty_like(tensor) # note: not using .index_copy because it was slower on cuda\n outputs[tile_indices.flatten()] = tensor\n outputs = outputs.reshape(tile_rows, tile_cols, cols // tile_cols, rows // tile_rows)\n outputs = outputs.permute(3, 0, 2, 1) # (rows // tile_rows, tile_rows), (cols // tile_cols, tile_cols)\n return outputs.reshape(rows, cols).contiguous()\n\n\n@dataclass\nclass MatmulLtState:\n _tile_indices: Optional[torch.Tensor] = None # TODO: remove\n\n force_no_igemmlt: bool = False\n\n CB: Optional[torch.Tensor] = None\n CxB: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SB: Optional[torch.Tensor] = None\n SCB: Optional[torch.Tensor] = None\n\n CxBt: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SBt: Optional[torch.Tensor] = None\n CBt: Optional[torch.Tensor] = None\n\n subB: Optional[torch.Tensor] = None\n\n outlier_pool: Optional[GlobalOutlierPooler] = None\n has_accumulated_gradients = False\n threshold = 0.0\n idx: Optional[torch.Tensor] = None\n is_training = True\n has_fp16_weights = True\n use_pool = False\n formatB = \"row\" # TODO: Deprecate/remove\n\n def reset_grads(self):\n self.CB = None\n self.CxB = None\n self.SB = None\n self.SCB = None\n\n self.CxBt = None\n self.SBt = None\n self.CBt = None\n\n @property\n def tile_indices(self):\n raise ValueError(\"tile_indices is no longer supported.\")\n\n\nclass MatMul8bitLt(torch.autograd.Function):\n @staticmethod\n def forward(\n ctx: torch.autograd.function.FunctionCtx,\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n ):\n state = state or MatmulLtState()\n\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.MatMul8bitLt","uri":"program://bitsandbytes/class/bitsandbytes.autograd._functions.MatMul8bitLt#L153-L294","kind":"class","name":"MatMul8bitLt","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":153,"end_line":294,"context_start_line":133,"context_end_line":314,"code":" is_training = True\n has_fp16_weights = True\n use_pool = False\n formatB = \"row\" # TODO: Deprecate/remove\n\n def reset_grads(self):\n self.CB = None\n self.CxB = None\n self.SB = None\n self.SCB = None\n\n self.CxBt = None\n self.SBt = None\n self.CBt = None\n\n @property\n def tile_indices(self):\n raise ValueError(\"tile_indices is no longer supported.\")\n\n\nclass MatMul8bitLt(torch.autograd.Function):\n @staticmethod\n def forward(\n ctx: torch.autograd.function.FunctionCtx,\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n ):\n state = state or MatmulLtState()\n\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n if A.shape[-1] == B.shape[0]:\n return torch.empty(A.shape[:-1] + B.shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B.shape[:1], dtype=A.dtype, device=A.device)\n\n input_shape = A.shape\n\n # Cast A to fp16\n if A.dtype != torch.float16 and not _is_compiling():\n warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n\n if len(A.shape) == 3:\n A = A.reshape(-1, A.shape[-1])\n\n # 1. Quantize A. Note that as a side-effect, outliers are suppressed in CA/CAt.\n if ctx.needs_input_grad[1]:\n # Slower path\n CA, CAt, SCA, SCAt, outlier_cols = F.int8_double_quant(A.to(torch.float16), threshold=state.threshold)\n else:\n # Fast path\n CA, SCA, outlier_cols = F.int8_vectorwise_quant(A.to(torch.float16), threshold=state.threshold)\n CAt = SCAt = None\n\n has_grad = False\n\n if state.has_fp16_weights or state.CB is None:\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.CB is None or state.SCB is None:\n state.reset_grads()\n\n # 2. Quantize B\n state.CB, state.SCB, _ = F.int8_vectorwise_quant(B.to(torch.float16))\n\n # Handle sparse decomposition\n if state.threshold > 0.0:\n state.idx = outlier_cols\n\n # Mixed Int8 Matmul + Dequant + Bias\n output, subA = torch.ops.bitsandbytes.int8_mixed_scaled_mm(\n A,\n CA,\n state.CB,\n SCA,\n state.SCB,\n outlier_cols,\n bias,\n )\n\n else:\n # Int8 Matmul + Dequant + Bias\n output = torch.ops.bitsandbytes.int8_scaled_mm.default(\n CA, state.CB, SCA, state.SCB, bias=bias, dtype=A.dtype\n )\n subA = None\n\n # 5. Save state\n ctx.state = state\n\n ctx.grad_shape = input_shape\n ctx.dtype_A = A.dtype\n ctx.dtype_bias = None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (CAt, subA, A)\n ctx.tensor_states = (SCAt, state.idx)\n else:\n ctx.tensors = [None, None, None]\n ctx.tensor_states = (None, None)\n ctx.save_for_backward(None, None)\n\n output_shape = (*input_shape[:-1], state.CB.shape[0])\n\n if len(input_shape) == 3:\n return output.reshape(output_shape)\n\n return output\n\n @staticmethod\n def backward(ctx: torch.autograd.function.FunctionCtx, grad_output: torch.Tensor):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n CAt, subA, A = ctx.tensors\n SCAt, idx = ctx.tensor_states\n state: MatmulLtState = ctx.state\n grad_A = grad_B = grad_bias = None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n if req_gradB:\n Cgrad, _, _, SCgradt, _ = F.int8_double_quant(grad_output.to(torch.float16))\n\n grad_B = torch.ops.bitsandbytes.int8_scaled_mm.default(\n Cgrad.t().contiguous(),\n CAt.t(),\n SCgradt,\n SCAt,\n dtype=torch.float16,\n )\n\n if state.threshold > 0.0 and subA is not None and subA.numel() > 0:\n grad_B[:, idx] += torch.matmul(grad_output.t(), subA)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul8bitFp(torch.autograd.Function):\n # For Intel CPU and XPU MatMul8bitFp is much faster (~3x) than MatMul8bitLt in finetune.\n # Because the MatMul8bitLt has more mechanisms in computing grad.\n # We don't have fast kernel for quant/dequant 8bit in CPU/XPU, so it's very slow.\n # We'd like to use dequant + matmul to run finetune with good performance.\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):\n if state.has_fp16_weights or state.CB is None:\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.CB is None or state.SCB is None:\n state.reset_grads()\n state.CB, state.SCB, _ = F.int8_vectorwise_quant(B.to(torch.float16))\n B = state.CB","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.MatMul8bitFp","uri":"program://bitsandbytes/class/bitsandbytes.autograd._functions.MatMul8bitFp#L297-L349","kind":"class","name":"MatMul8bitFp","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":297,"end_line":349,"context_start_line":277,"context_end_line":369,"code":" Cgrad.t().contiguous(),\n CAt.t(),\n SCgradt,\n SCAt,\n dtype=torch.float16,\n )\n\n if state.threshold > 0.0 and subA is not None and subA.numel() > 0:\n grad_B[:, idx] += torch.matmul(grad_output.t(), subA)\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul8bitFp(torch.autograd.Function):\n # For Intel CPU and XPU MatMul8bitFp is much faster (~3x) than MatMul8bitLt in finetune.\n # Because the MatMul8bitLt has more mechanisms in computing grad.\n # We don't have fast kernel for quant/dequant 8bit in CPU/XPU, so it's very slow.\n # We'd like to use dequant + matmul to run finetune with good performance.\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState):\n if state.has_fp16_weights or state.CB is None:\n has_grad = getattr(B, \"grad\", None) is not None\n is_transposed = not B.is_contiguous() and B.shape[0] == B.stride(1)\n if is_transposed:\n B = B.contiguous()\n\n if (state.is_training and not has_grad) or state.CB is None or state.SCB is None:\n state.reset_grads()\n state.CB, state.SCB, _ = F.int8_vectorwise_quant(B.to(torch.float16))\n B = state.CB\n\n CB = state.CB.data.to(A.dtype).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n output = torch.nn.functional.linear(A, CB, bias)\n ctx.state = state\n ctx.dtype_A = A.dtype\n ctx.grad_shape = A.shape\n ctx.A = A\n ctx.dtype_bias = None if bias is None else bias.dtype\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n req_gradA, req_gradB, _, req_gradBias, _ = ctx.needs_input_grad\n A = ctx.A\n state = ctx.state\n grad_A = grad_B = grad_bias = None\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n if req_gradB:\n grad_B = torch.matmul(A.t(), grad_output).t()\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul4Bit(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, quant_state: Optional[F.QuantState] = None):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n B_shape = quant_state.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.MatMul4Bit","uri":"program://bitsandbytes/class/bitsandbytes.autograd._functions.MatMul4Bit#L352-L406","kind":"class","name":"MatMul4Bit","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":352,"end_line":406,"context_start_line":332,"context_end_line":426,"code":" # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # Cast grad_output to fp16\n if len(grad_output.shape) == 3:\n grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n if req_gradB:\n grad_B = torch.matmul(A.t(), grad_output).t()\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul4Bit(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, quant_state: Optional[F.QuantState] = None):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n B_shape = quant_state.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)\n\n # 3. Save state\n ctx.state = quant_state\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (None, B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, _, _, req_gradBias, _ = ctx.needs_input_grad\n _, B = ctx.tensors\n\n grad_A, grad_B, grad_bias = None, None, None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # not supported by PyTorch. TODO: create work-around\n # if req_gradB: grad_B = torch.matmul(grad_output.t(), A)\n if req_gradA:\n grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef matmul(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias: Optional[torch.Tensor] = None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n # MatMul8bitLt is slower because no fast kernel for quant/dequant 8bit in CPU/XPU\n if state.is_training:\n if A.device.type in (\"cpu\", \"xpu\"):\n return MatMul8bitFp.apply(A, B, out, bias, state)\n return MatMul8bitLt.apply(A, B, out, bias, state)\n\n","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.matmul","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.matmul#L409-L424","kind":"function","name":"matmul","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":409,"end_line":424,"context_start_line":389,"context_end_line":444,"code":" bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, _, _, req_gradBias, _ = ctx.needs_input_grad\n _, B = ctx.tensors\n\n grad_A, grad_B, grad_bias = None, None, None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # not supported by PyTorch. TODO: create work-around\n # if req_gradB: grad_B = torch.matmul(grad_output.t(), A)\n if req_gradA:\n grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef matmul(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias: Optional[torch.Tensor] = None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n # MatMul8bitLt is slower because no fast kernel for quant/dequant 8bit in CPU/XPU\n if state.is_training:\n if A.device.type in (\"cpu\", \"xpu\"):\n return MatMul8bitFp.apply(A, B, out, bias, state)\n return MatMul8bitLt.apply(A, B, out, bias, state)\n\n\ndef matmul_4bit(\n A: torch.Tensor,\n B: torch.Tensor,\n quant_state: F.QuantState,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n):\n assert quant_state is not None\n\n if A.numel() == A.shape[-1] and A.requires_grad == False and A.device.type != \"hpu\":\n if A.shape[-1] % quant_state.blocksize != 0:\n warn(\n f\"Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}\",\n )\n return MatMul4Bit.apply(A, B, out, bias, quant_state)\n else:\n out = F.gemv_4bit(A, B.t(), out, state=quant_state)\n if bias is not None:","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.matmul_4bit","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.matmul_4bit#L427-L448","kind":"function","name":"matmul_4bit","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":427,"end_line":448,"context_start_line":407,"context_end_line":448,"code":"\n\ndef matmul(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias: Optional[torch.Tensor] = None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n # MatMul8bitLt is slower because no fast kernel for quant/dequant 8bit in CPU/XPU\n if state.is_training:\n if A.device.type in (\"cpu\", \"xpu\"):\n return MatMul8bitFp.apply(A, B, out, bias, state)\n return MatMul8bitLt.apply(A, B, out, bias, state)\n\n\ndef matmul_4bit(\n A: torch.Tensor,\n B: torch.Tensor,\n quant_state: F.QuantState,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n):\n assert quant_state is not None\n\n if A.numel() == A.shape[-1] and A.requires_grad == False and A.device.type != \"hpu\":\n if A.shape[-1] % quant_state.blocksize != 0:\n warn(\n f\"Some matrices hidden dimension is not a multiple of {quant_state.blocksize} and efficient inference kernels are not supported for these (slow). Matrix input size found: {A.shape}\",\n )\n return MatMul4Bit.apply(A, B, out, bias, quant_state)\n else:\n out = F.gemv_4bit(A, B.t(), out, state=quant_state)\n if bias is not None:\n out += bias\n return out\n else:\n return MatMul4Bit.apply(A, B, out, bias, quant_state)","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.__init__","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.__init__#L26-L27","kind":"function","name":"__init__","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":26,"end_line":27,"context_start_line":6,"context_end_line":47,"code":"\nimport torch\nfrom typing_extensions import deprecated\n\nimport bitsandbytes.functional as F\n\n# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:\n# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py\n\n\n\"\"\"\n This class pools outlier dimensions across layers.\n This is particularly important for small models where outlier features\n are less systematic and occur with low frequency.\n\"\"\"\n\n\nclass GlobalOutlierPooler:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.initialize","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.initialize#L29-L31","kind":"function","name":"initialize","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":29,"end_line":31,"context_start_line":9,"context_end_line":51,"code":"\nimport bitsandbytes.functional as F\n\n# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:\n# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py\n\n\n\"\"\"\n This class pools outlier dimensions across layers.\n This is particularly important for small models where outlier features\n are less systematic and occur with low frequency.\n\"\"\"\n\n\nclass GlobalOutlierPooler:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.get_instance","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.get_instance#L34-L38","kind":"function","name":"get_instance","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":34,"end_line":38,"context_start_line":14,"context_end_line":58,"code":"\n\n\"\"\"\n This class pools outlier dimensions across layers.\n This is particularly important for small models where outlier features\n are less systematic and occur with low frequency.\n\"\"\"\n\n\nclass GlobalOutlierPooler:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef get_inverse_transform_indices(\n transform_tile: Callable[[torch.Tensor], torch.Tensor],\n tile_size: tuple[int, int],","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.add_outliers","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.add_outliers#L40-L46","kind":"function","name":"add_outliers","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":40,"end_line":46,"context_start_line":20,"context_end_line":66,"code":"\"\"\"\n\n\nclass GlobalOutlierPooler:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef get_inverse_transform_indices(\n transform_tile: Callable[[torch.Tensor], torch.Tensor],\n tile_size: tuple[int, int],\n):\n \"\"\"\n Compute a permutation of indices that invert the specified (tiled) matrix transformation\n\n :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]\n :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere\n :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size\n :example: transform_tile function for the turing layout (bitsandbytes.functional as F)","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.get_current_outlier_idx","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.get_current_outlier_idx#L48-L49","kind":"function","name":"get_current_outlier_idx","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":48,"end_line":49,"context_start_line":28,"context_end_line":69,"code":"\n def initialize(self):\n self.outliers = set()\n self.model_dim = None\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance\n\n def add_outliers(self, outlier_idx, feature_dim):\n if self.model_dim is None:\n self.model_dim = feature_dim\n if feature_dim != self.model_dim:\n return # we do not encode outliers for the 2nd FFN layer\n\n self.outliers.update(outlier_idx.tolist())\n\n def get_current_outlier_idx(self):\n return torch.Tensor(list(self.outliers)).to(torch.int64)\n\n\n@deprecated(\n \"This function is deprecated and will be removed in a future release.\",\n category=FutureWarning,\n)\ndef get_inverse_transform_indices(\n transform_tile: Callable[[torch.Tensor], torch.Tensor],\n tile_size: tuple[int, int],\n):\n \"\"\"\n Compute a permutation of indices that invert the specified (tiled) matrix transformation\n\n :param transform_tile: a function that applies forward transform to a tensor of shape [dim1, dim2]\n :param tile_size: higher-level tile dimensions, i.e. (8, 32) for Turing and (32, 32) for Ampere\n :note: we assume that tile_transform applies to a cpu-based int8 tensor of shape tile_size\n :example: transform_tile function for the turing layout (bitsandbytes.functional as F)\n :returns: indices\n \"\"\"\n d1, d2 = tile_size","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.reset_grads","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.reset_grads#L138-L146","kind":"function","name":"reset_grads","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":138,"end_line":146,"context_start_line":118,"context_end_line":166,"code":" CB: Optional[torch.Tensor] = None\n CxB: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SB: Optional[torch.Tensor] = None\n SCB: Optional[torch.Tensor] = None\n\n CxBt: Optional[torch.Tensor] = None # TODO: Deprecate/remove\n SBt: Optional[torch.Tensor] = None\n CBt: Optional[torch.Tensor] = None\n\n subB: Optional[torch.Tensor] = None\n\n outlier_pool: Optional[GlobalOutlierPooler] = None\n has_accumulated_gradients = False\n threshold = 0.0\n idx: Optional[torch.Tensor] = None\n is_training = True\n has_fp16_weights = True\n use_pool = False\n formatB = \"row\" # TODO: Deprecate/remove\n\n def reset_grads(self):\n self.CB = None\n self.CxB = None\n self.SB = None\n self.SCB = None\n\n self.CxBt = None\n self.SBt = None\n self.CBt = None\n\n @property\n def tile_indices(self):\n raise ValueError(\"tile_indices is no longer supported.\")\n\n\nclass MatMul8bitLt(torch.autograd.Function):\n @staticmethod\n def forward(\n ctx: torch.autograd.function.FunctionCtx,\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n ):\n state = state or MatmulLtState()\n\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.tile_indices","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.tile_indices#L149-L150","kind":"function","name":"tile_indices","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":149,"end_line":150,"context_start_line":129,"context_end_line":170,"code":" outlier_pool: Optional[GlobalOutlierPooler] = None\n has_accumulated_gradients = False\n threshold = 0.0\n idx: Optional[torch.Tensor] = None\n is_training = True\n has_fp16_weights = True\n use_pool = False\n formatB = \"row\" # TODO: Deprecate/remove\n\n def reset_grads(self):\n self.CB = None\n self.CxB = None\n self.SB = None\n self.SCB = None\n\n self.CxBt = None\n self.SBt = None\n self.CBt = None\n\n @property\n def tile_indices(self):\n raise ValueError(\"tile_indices is no longer supported.\")\n\n\nclass MatMul8bitLt(torch.autograd.Function):\n @staticmethod\n def forward(\n ctx: torch.autograd.function.FunctionCtx,\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n bias: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n ):\n state = state or MatmulLtState()\n\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.forward","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.forward#L357-L384","kind":"function","name":"forward","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":357,"end_line":384,"context_start_line":337,"context_end_line":404,"code":" grad_output = grad_output.reshape(-1, grad_output.shape[-1]).contiguous()\n\n if req_gradB:\n grad_B = torch.matmul(A.t(), grad_output).t()\n\n if req_gradA:\n if state.CB is not None:\n CB = state.CB.to(ctx.dtype_A, copy=True).mul_(state.SCB.unsqueeze(1).mul(1.0 / 127.0))\n grad_A = torch.matmul(grad_output.to(ctx.dtype_A), CB).view(ctx.grad_shape)\n else:\n raise Exception(\"State must contain CB matrix for backward\")\n\n return grad_A, grad_B, None, grad_bias, None\n\n\nclass MatMul4Bit(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod\n def forward(ctx, A, B, out=None, bias=None, quant_state: Optional[F.QuantState] = None):\n # default of pytorch behavior if inputs are empty\n ctx.is_empty = False\n if prod(A.shape) == 0:\n ctx.is_empty = True\n ctx.A = A\n ctx.B = B\n ctx.bias = bias\n B_shape = quant_state.shape\n if A.shape[-1] == B_shape[0]:\n return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)\n\n # 3. Save state\n ctx.state = quant_state\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (None, B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, _, _, req_gradBias, _ = ctx.needs_input_grad\n _, B = ctx.tensors\n\n grad_A, grad_B, grad_bias = None, None, None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # not supported by PyTorch. TODO: create work-around\n # if req_gradB: grad_B = torch.matmul(grad_output.t(), A)\n if req_gradA:\n grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:bitsandbytes.autograd._functions.backward","uri":"program://bitsandbytes/function/bitsandbytes.autograd._functions.backward#L387-L406","kind":"function","name":"backward","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":387,"end_line":406,"context_start_line":367,"context_end_line":426,"code":" return torch.empty(A.shape[:-1] + B_shape[1:], dtype=A.dtype, device=A.device)\n else:\n return torch.empty(A.shape[:-1] + B_shape[:1], dtype=A.dtype, device=A.device)\n\n # 1. Dequantize\n # 2. MatmulnN\n output = torch.nn.functional.linear(A, F.dequantize_4bit(B, quant_state).to(A.dtype).t(), bias)\n\n # 3. Save state\n ctx.state = quant_state\n ctx.dtype_A, ctx.dtype_B, ctx.dtype_bias = A.dtype, B.dtype, None if bias is None else bias.dtype\n\n if any(ctx.needs_input_grad[:2]):\n ctx.tensors = (None, B)\n else:\n ctx.tensors = (None, None)\n\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.is_empty:\n bias_grad = None if ctx.bias is None else torch.zeros_like(ctx.bias)\n return torch.zeros_like(ctx.A), torch.zeros_like(ctx.B), None, bias_grad, None\n\n req_gradA, _, _, req_gradBias, _ = ctx.needs_input_grad\n _, B = ctx.tensors\n\n grad_A, grad_B, grad_bias = None, None, None\n\n if req_gradBias:\n # compute grad_bias first before changing grad_output dtype\n grad_bias = grad_output.sum(0, dtype=ctx.dtype_bias)\n\n # not supported by PyTorch. TODO: create work-around\n # if req_gradB: grad_B = torch.matmul(grad_output.t(), A)\n if req_gradA:\n grad_A = torch.matmul(grad_output, F.dequantize_4bit(B, ctx.state).to(grad_output.dtype).t())\n\n return grad_A, grad_B, None, grad_bias, None\n\n\ndef matmul(\n A: torch.Tensor,\n B: torch.Tensor,\n out: Optional[torch.Tensor] = None,\n state: Optional[MatmulLtState] = None,\n threshold=0.0,\n bias: Optional[torch.Tensor] = None,\n):\n state = state or MatmulLtState()\n if threshold > 0.0:\n state.threshold = threshold\n # MatMul8bitLt is slower because no fast kernel for quant/dequant 8bit in CPU/XPU\n if state.is_training:\n if A.device.type in (\"cpu\", \"xpu\"):\n return MatMul8bitFp.apply(A, B, out, bias, state)\n return MatMul8bitLt.apply(A, B, out, bias, state)\n\n","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:scripts.stale","uri":"program://bitsandbytes/module/scripts.stale#L1-L59","kind":"module","name":"scripts.stale","path":"scripts/stale.py","language":"python","start_line":1,"end_line":59,"context_start_line":1,"context_end_line":59,"code":"# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nScript to close stale issue. Taken in part from the AllenNLP repository.\nhttps://github.com/allenai/allennlp.\n\"\"\"\n\nfrom datetime import datetime as dt, timezone\nimport os\n\nfrom github import Github\n\n# All labels that we don't want to touch\nLABELS_TO_EXEMPT = [\n \"feature-request\",\n]\n\n\ndef main():\n g = Github(os.environ[\"GITHUB_TOKEN\"])\n repo = g.get_repo(\"TimDettmers/bitsandbytes\")\n open_issues = repo.get_issues(state=\"open\")\n\n for issue in open_issues:\n comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True)\n last_comment = comments[0] if len(comments) > 0 else None\n if (\n last_comment is not None\n and last_comment.user.login == \"github-actions[bot]\"\n and (dt.now(timezone.utc) - issue.updated_at).days > 7\n and (dt.now(timezone.utc) - issue.created_at).days >= 30\n and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())\n ):\n issue.edit(state=\"closed\")\n elif (\n (dt.now(timezone.utc) - issue.updated_at).days > 23\n and (dt.now(timezone.utc) - issue.created_at).days >= 30\n and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())\n ):\n issue.create_comment(\n \"This issue has been automatically marked as stale because it has not had \"\n \"recent activity. If you think this still needs to be addressed \"\n \"please comment on this thread.\\n\\n\",\n )\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"e7ca72087ad971d1869fdd73a05313d17d47da46551cb713ed90de8c1b36a4d6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:scripts.stale.main","uri":"program://bitsandbytes/function/scripts.stale.main#L30-L55","kind":"function","name":"main","path":"scripts/stale.py","language":"python","start_line":30,"end_line":55,"context_start_line":10,"context_end_line":59,"code":"# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nScript to close stale issue. Taken in part from the AllenNLP repository.\nhttps://github.com/allenai/allennlp.\n\"\"\"\n\nfrom datetime import datetime as dt, timezone\nimport os\n\nfrom github import Github\n\n# All labels that we don't want to touch\nLABELS_TO_EXEMPT = [\n \"feature-request\",\n]\n\n\ndef main():\n g = Github(os.environ[\"GITHUB_TOKEN\"])\n repo = g.get_repo(\"TimDettmers/bitsandbytes\")\n open_issues = repo.get_issues(state=\"open\")\n\n for issue in open_issues:\n comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True)\n last_comment = comments[0] if len(comments) > 0 else None\n if (\n last_comment is not None\n and last_comment.user.login == \"github-actions[bot]\"\n and (dt.now(timezone.utc) - issue.updated_at).days > 7\n and (dt.now(timezone.utc) - issue.created_at).days >= 30\n and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())\n ):\n issue.edit(state=\"closed\")\n elif (\n (dt.now(timezone.utc) - issue.updated_at).days > 23\n and (dt.now(timezone.utc) - issue.created_at).days >= 30\n and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())\n ):\n issue.create_comment(\n \"This issue has been automatically marked as stale because it has not had \"\n \"recent activity. If you think this still needs to be addressed \"\n \"please comment on this thread.\\n\\n\",\n )\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"e7ca72087ad971d1869fdd73a05313d17d47da46551cb713ed90de8c1b36a4d6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.matmul_benchmark","uri":"program://bitsandbytes/module/benchmarking.matmul_benchmark#L1-L215","kind":"module","name":"benchmarking.matmul_benchmark","path":"benchmarking/matmul_benchmark.py","language":"python","start_line":1,"end_line":215,"context_start_line":1,"context_end_line":215,"code":"\"\"\"\nExtracted from tests/test_functional.py\n\nUsage: pytest benchmarking/matmul_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\n\nk = 20\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\n\n\n@pytest.mark.parametrize(\n (\"batch\", \"seq\", \"model\", \"hidden\"),\n [\n # pytest.param(1, 128, 6656, 4 * 6656, id=\"batch=1, seq=128, model=6656, hidden=26k\"),\n pytest.param(1, 1, 3584, 512, id=\"batch=1, seq=128, model=3584, hidden=19k\"),\n # pytest.param(4, 128, 6656, 4 * 6656, id=\"batch=4, seq=128, model=6656, hidden=26k\"),\n # pytest.param(16, 256, 6656, 4 * 6656, id=\"batch=16, seq=256, model=6656, hidden=26k\")\n ],\n)\n@pytest.mark.benchmark\ndef test_bench_matmul(batch, seq, model, hidden):\n iters = 1000\n formatB = F.get_special_format_str()\n\n A = torch.randn(batch, seq, model, device=\"cuda\").half()\n B = torch.empty(hidden, model, dtype=torch.float16, device=\"cuda\")\n torch.nn.init.xavier_uniform_(B)\n\n B_fp4, state = F.quantize_fp4(B)\n B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)\n\n B_nf4, state_nf4 = F.quantize_nf4(B)\n B_nf4_c, state_nf4_c = F.quantize_nf4(B, compress_statistics=True)\n\n linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()\n linear8bit.eval()\n\n outliers = torch.randint(0, model, size=(5,)).cuda()\n A[:, :, outliers] = 8.0\n\n linearMixedBit = bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half()\n # linearMixedBit.eval()\n\n linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()\n linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()\n bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)\n\n # warmup\n for i in range(iters):\n torch.matmul(A, B.t())\n torch.cuda.synchronize()\n print(\"\")\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n torch.matmul(A, B.t())\n torch.cuda.synchronize()\n print(\n f\"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\",\n )\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)\n # torch.cuda.synchronize()\n # print( f\"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\" )\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)\n # torch.cuda.synchronize()\n # print( f\"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\" )\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)\n torch.cuda.synchronize()\n print(f\"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\")\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul_4bit(A, B_nf4_c.t(), quant_state=state_nf4_c)\n torch.cuda.synchronize()\n print(\n f\"bnb nf4+DQ: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul(A, B)\n torch.cuda.synchronize()\n print(\n f\"B -> CB (each iteration): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul(A, B, threshold=6.0)\n torch.cuda.synchronize()\n print(\n f\"B -> CB + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n CA, SCA, _ = F.int8_vectorwise_quant(A, threshold=0.0)\n CB, SCB, _ = F.int8_vectorwise_quant(B)\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n # CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)\n out32 = F.int8_linear_matmul(CA, CB)\n torch.cuda.synchronize()\n print(\n f\"no overhead int8 [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n # C32A, SA = F.transform(CA, \"col32\")\n\n # CxB, SB = F.transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # torch.cuda.synchronize()\n # print(f\"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)\n # C32A, SA = F.transform(CA, \"col32\")\n # CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)\n # CxB, SB = F.transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # torch.cuda.synchronize()\n # print(f\"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # BA, statsB = F.vectorwise_quant(B, dim=1)\n # CxB, SB = F.nvidia_transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # A2 = A.view(-1, A.shape[-1]).contiguous()\n # CA, statsA = F.vectorwise_quant(A2, dim=1)\n # C32A, SA = F.nvidia_transform(CA, \"col32\")\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # Cout, Sout = F.nvidia_transform(out32, \"row\", state=Sout32)\n # F.vectorwise_mm_dequant(Cout, statsA, statsB.t())\n # torch.cuda.synchronize()\n # print(f\"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # BA, statsB = F.vectorwise_quant(B, dim=1, quant_type=\"linear\")\n # CxB, SB = F.nvidia_transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # A2 = A.view(-1, A.shape[-1]).contiguous()\n # CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type=\"linear\")\n # C32A, SA = F.nvidia_transform(CA, \"col32\")\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # Cout, Sout = F.nvidia_transform(out32, \"row\", state=Sout32)\n # out = Cout * statsB * statsA * (1.0 / (127 * 127))\n # torch.cuda.synchronize()\n # print(f\"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n linear8bit(A)\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n linear8bit(A)\n torch.cuda.synchronize()\n print(\n f\"bnb linear8bitlt (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n linearMixedBit(A)\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n linearMixedBit(A)\n torch.cuda.synchronize()\n print(\n f\"bnb linear8bitlt with threshold (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n # linear8bit_train(A)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # linear8bit_train(A)\n # torch.cuda.synchronize()\n # print( f\"bnb linear8bitlt (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # linear8bit_train_thresh(A)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # linear8bit_train(A)\n # torch.cuda.synchronize()\n # print( f\"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")","source_hash":"1f22434d022a6e12736ab7cf38d579bcc7127ea324482581ec5fc1506637e5e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.matmul_benchmark.test_bench_matmul","uri":"program://bitsandbytes/function/benchmarking.matmul_benchmark.test_bench_matmul#L30-L199","kind":"function","name":"test_bench_matmul","path":"benchmarking/matmul_benchmark.py","language":"python","start_line":30,"end_line":199,"context_start_line":10,"context_end_line":215,"code":"import torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\n\nk = 20\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\n\n\n@pytest.mark.parametrize(\n (\"batch\", \"seq\", \"model\", \"hidden\"),\n [\n # pytest.param(1, 128, 6656, 4 * 6656, id=\"batch=1, seq=128, model=6656, hidden=26k\"),\n pytest.param(1, 1, 3584, 512, id=\"batch=1, seq=128, model=3584, hidden=19k\"),\n # pytest.param(4, 128, 6656, 4 * 6656, id=\"batch=4, seq=128, model=6656, hidden=26k\"),\n # pytest.param(16, 256, 6656, 4 * 6656, id=\"batch=16, seq=256, model=6656, hidden=26k\")\n ],\n)\n@pytest.mark.benchmark\ndef test_bench_matmul(batch, seq, model, hidden):\n iters = 1000\n formatB = F.get_special_format_str()\n\n A = torch.randn(batch, seq, model, device=\"cuda\").half()\n B = torch.empty(hidden, model, dtype=torch.float16, device=\"cuda\")\n torch.nn.init.xavier_uniform_(B)\n\n B_fp4, state = F.quantize_fp4(B)\n B_fp4_c, state_c = F.quantize_fp4(B, compress_statistics=True)\n\n B_nf4, state_nf4 = F.quantize_nf4(B)\n B_nf4_c, state_nf4_c = F.quantize_nf4(B, compress_statistics=True)\n\n linear8bit = bnb.nn.Linear8bitLt(model, hidden, False, False).cuda().half()\n linear8bit.eval()\n\n outliers = torch.randint(0, model, size=(5,)).cuda()\n A[:, :, outliers] = 8.0\n\n linearMixedBit = bnb.nn.Linear8bitLt(model, hidden, False, False, threshold=6.0).cuda().half()\n # linearMixedBit.eval()\n\n linear8bit_train = bnb.nn.Linear8bitLt(model, hidden, False).cuda().half()\n linear8bit_train_thresh = bnb.nn.Linear8bitLt(model, hidden, False, threshold=6.0).cuda().half()\n bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)\n\n # warmup\n for i in range(iters):\n torch.matmul(A, B.t())\n torch.cuda.synchronize()\n print(\"\")\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n torch.matmul(A, B.t())\n torch.cuda.synchronize()\n print(\n f\"pytorch fp16: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\",\n )\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # bnb.matmul_4bit(A, B_fp4.t(), quant_state=state)\n # torch.cuda.synchronize()\n # print( f\"bnb fp4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\" )\n\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # bnb.matmul_4bit(A, B_fp4.t(), quant_state=state_c)\n # torch.cuda.synchronize()\n # print( f\"bnb fp4 + compressed stats: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\" )\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul_4bit(A, B_nf4.t(), quant_state=state_nf4)\n torch.cuda.synchronize()\n print(f\"bnb nf4: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\")\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul_4bit(A, B_nf4_c.t(), quant_state=state_nf4_c)\n torch.cuda.synchronize()\n print(\n f\"bnb nf4+DQ: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul(A, B)\n torch.cuda.synchronize()\n print(\n f\"B -> CB (each iteration): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n bnb.matmul(A, B, threshold=6.0)\n torch.cuda.synchronize()\n print(\n f\"B -> CB + threshold: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n CA, SCA, _ = F.int8_vectorwise_quant(A, threshold=0.0)\n CB, SCB, _ = F.int8_vectorwise_quant(B)\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n # CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)\n out32 = F.int8_linear_matmul(CA, CB)\n torch.cuda.synchronize()\n print(\n f\"no overhead int8 [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n # C32A, SA = F.transform(CA, \"col32\")\n\n # CxB, SB = F.transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # torch.cuda.synchronize()\n # print(f\"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # CA, CAt, SCA, SCAt, coo_tensorA = F.double_quant(A, threshold=0.0)\n # C32A, SA = F.transform(CA, \"col32\")\n # CB, CBt, SCB, SCBt, coo_tensorB = F.double_quant(B)\n # CxB, SB = F.transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # torch.cuda.synchronize()\n # print(f\"no overhead matmul-lt: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # BA, statsB = F.vectorwise_quant(B, dim=1)\n # CxB, SB = F.nvidia_transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # A2 = A.view(-1, A.shape[-1]).contiguous()\n # CA, statsA = F.vectorwise_quant(A2, dim=1)\n # C32A, SA = F.nvidia_transform(CA, \"col32\")\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # Cout, Sout = F.nvidia_transform(out32, \"row\", state=Sout32)\n # F.vectorwise_mm_dequant(Cout, statsA, statsB.t())\n # torch.cuda.synchronize()\n # print(f\"vector pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # BA, statsB = F.vectorwise_quant(B, dim=1, quant_type=\"linear\")\n # CxB, SB = F.nvidia_transform(CB, to_order=formatB)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # A2 = A.view(-1, A.shape[-1]).contiguous()\n # CA, statsA = F.vectorwise_quant(A2, dim=1, quant_type=\"linear\")\n # C32A, SA = F.nvidia_transform(CA, \"col32\")\n # out32, Sout32 = F.igemmlt(C32A, CxB, SA, SB)\n # Cout, Sout = F.nvidia_transform(out32, \"row\", state=Sout32)\n # out = Cout * statsB * statsA * (1.0 / (127 * 127))\n # torch.cuda.synchronize()\n # print(f\"linear pytorch + nvidia: [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n linear8bit(A)\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n linear8bit(A)\n torch.cuda.synchronize()\n print(\n f\"bnb linear8bitlt (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n linearMixedBit(A)\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(iters):\n linearMixedBit(A)\n torch.cuda.synchronize()\n print(\n f\"bnb linear8bitlt with threshold (eval): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time() - t0:.4f}s\"\n )\n\n # linear8bit_train(A)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # linear8bit_train(A)\n # torch.cuda.synchronize()\n # print( f\"bnb linear8bitlt (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")\n\n # linear8bit_train_thresh(A)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(iters):\n # linear8bit_train(A)\n # torch.cuda.synchronize()\n # print( f\"bnb linear8bitlt with threshold (training): [{batch},{seq},{model}], [{model},{hidden}]->[{batch},{seq},{hidden}]: {time.time()-t0:.4f}s\")","source_hash":"1f22434d022a6e12736ab7cf38d579bcc7127ea324482581ec5fc1506637e5e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.inference_benchmark","uri":"program://bitsandbytes/module/benchmarking.inference_benchmark#L1-L175","kind":"module","name":"benchmarking.inference_benchmark","path":"benchmarking/inference_benchmark.py","language":"python","start_line":1,"end_line":175,"context_start_line":1,"context_end_line":175,"code":"\"\"\"\nInference benchmarking tool.\n\nRequirements:\n transformers\n accelerate\n bitsandbytes\n optimum-benchmark\n\nUsage: python inference_benchmark.py model_id\n\noptions:\n -h, --help show this help message and exit\n --configs {bf16,fp16,nf4,nf4-dq,int8,int8-decomp} [{bf16,fp16,nf4,nf4-dq,int8,int8-decomp} ...]\n --bf16\n --fp16\n --nf4\n --nf4-dq\n --int8\n --int8-decomp\n --batches BATCHES [BATCHES ...]\n --input-length INPUT_LENGTH\n --out-dir OUT_DIR\n --iterations ITERATIONS\n --warmup-runs WARMUP_RUNS\n --output-length OUTPUT_LENGTH\n\"\"\"\n\nimport argparse\nfrom pathlib import Path\n\nfrom optimum_benchmark import Benchmark, BenchmarkConfig, InferenceConfig, ProcessConfig, PyTorchConfig\nfrom optimum_benchmark.logging_utils import setup_logging\nimport torch\n\ntorch.backends.cudnn.benchmark = False\ntorch.backends.cudnn.deterministic = True\n\nBFLOAT16_SUPPORT = torch.cuda.get_device_capability()[0] >= 8\n\nWEIGHTS_CONFIGS = {\n \"fp16\": {\"torch_dtype\": \"float16\", \"quantization_scheme\": None, \"quantization_config\": {}},\n \"bf16\": {\"torch_dtype\": \"bfloat16\", \"quantization_scheme\": None, \"quantization_config\": {}},\n \"nf4\": {\n \"torch_dtype\": \"bfloat16\" if BFLOAT16_SUPPORT else \"float16\",\n \"quantization_scheme\": \"bnb\",\n \"quantization_config\": {\n \"load_in_4bit\": True,\n \"bnb_4bit_quant_type\": \"nf4\",\n \"bnb_4bit_use_double_quant\": False,\n \"bnb_4bit_compute_dtype\": torch.bfloat16 if BFLOAT16_SUPPORT else \"float16\",\n },\n },\n \"nf4-dq\": {\n \"torch_dtype\": \"bfloat16\" if BFLOAT16_SUPPORT else \"float16\",\n \"quantization_scheme\": \"bnb\",\n \"quantization_config\": {\n \"load_in_4bit\": True,\n \"bnb_4bit_quant_type\": \"nf4\",\n \"bnb_4bit_use_double_quant\": True,\n \"bnb_4bit_compute_dtype\": torch.bfloat16 if BFLOAT16_SUPPORT else \"float16\",\n },\n },\n \"int8-decomp\": {\n \"torch_dtype\": \"float16\",\n \"quantization_scheme\": \"bnb\",\n \"quantization_config\": {\n \"load_in_8bit\": True,\n \"llm_int8_threshold\": 6.0,\n },\n },\n \"int8\": {\n \"torch_dtype\": \"float16\",\n \"quantization_scheme\": \"bnb\",\n \"quantization_config\": {\n \"load_in_8bit\": True,\n \"llm_int8_threshold\": 0.0,\n },\n },\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"bitsandbytes inference benchmark tool\")\n\n parser.add_argument(\"model_id\", type=str, help=\"The model checkpoint to use.\")\n\n parser.add_argument(\n \"--configs\",\n nargs=\"+\",\n choices=[\"bf16\", \"fp16\", \"nf4\", \"nf4-dq\", \"int8\", \"int8-decomp\"],\n default=[\"nf4\", \"int8\", \"int8-decomp\"],\n )\n parser.add_argument(\"--bf16\", dest=\"configs\", action=\"append_const\", const=\"bf16\")\n parser.add_argument(\"--fp16\", dest=\"configs\", action=\"append_const\", const=\"fp16\")\n parser.add_argument(\"--nf4\", dest=\"configs\", action=\"append_const\", const=\"nf4\")\n parser.add_argument(\"--nf4-dq\", dest=\"configs\", action=\"append_const\", const=\"nf4-dq\")\n parser.add_argument(\"--int8\", dest=\"configs\", action=\"append_const\", const=\"int8\")\n parser.add_argument(\"--int8-decomp\", dest=\"configs\", action=\"append_const\", const=\"int8-decomp\")\n\n parser.add_argument(\"--batches\", nargs=\"+\", type=int, default=[1, 8, 16, 32])\n parser.add_argument(\"--input-length\", type=int, default=64)\n\n parser.add_argument(\"--out-dir\", type=str, default=\"reports\")\n\n parser.add_argument(\"--iterations\", type=int, default=10, help=\"Number of iterations for each benchmark run\")\n parser.add_argument(\n \"--warmup-runs\", type=int, default=10, help=\"Number of warmup runs to discard before measurement\"\n )\n parser.add_argument(\n \"--output-length\",\n type=int,\n default=64,\n help=\"If set, `max_new_tokens` and `min_new_tokens` will be set to this value.\",\n )\n\n return parser.parse_args()\n\n\ndef run_benchmark(args, config, batch_size):\n launcher_config = ProcessConfig(device_isolation=True, device_isolation_action=\"warn\", start_method=\"spawn\")\n scenario_config = InferenceConfig(\n latency=True,\n memory=True,\n input_shapes={\"batch_size\": batch_size, \"sequence_length\": args.input_length},\n iterations=args.iterations,\n warmup_runs=args.warmup_runs,\n # set duration to 0 to disable the duration-based stopping criterion\n # this is IMPORTANT to ensure that all benchmarks run the same number of operations, regardless of hardware speed/bottlenecks\n duration=0,\n # for consistent results, set a fixed min and max for output tokens\n generate_kwargs={\"min_new_tokens\": args.output_length, \"max_new_tokens\": args.output_length},\n forward_kwargs={\"min_new_tokens\": args.output_length, \"max_new_tokens\": args.output_length},\n )\n\n backend_config = PyTorchConfig(\n device=\"cuda\",\n device_ids=\"0\",\n device_map=\"auto\",\n no_weights=False,\n model=args.model_id,\n **WEIGHTS_CONFIGS[config],\n )\n\n test_name = (\n f\"benchmark-{config}\"\n f\"-bsz-{batch_size}\"\n f\"-isz-{args.input_length}\"\n f\"-osz-{args.output_length}\"\n f\"-iter-{args.iterations}\"\n f\"-wrmup-{args.warmup_runs}\"\n )\n benchmark_config = BenchmarkConfig(\n name=test_name,\n scenario=scenario_config,\n launcher=launcher_config,\n backend=backend_config,\n )\n\n out_path = out_dir / (test_name + \".json\")\n print(f\"[{test_name}] Starting:\")\n benchmark_report = Benchmark.launch(benchmark_config)\n benchmark_report.save_json(out_path)\n\n\nif __name__ == \"__main__\":\n setup_logging(level=\"INFO\")\n args = parse_args()\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n\n for batch_size in args.batches:\n for config in args.configs:\n run_benchmark(args, config, batch_size)","source_hash":"1ad11cf63293620045523c25941b31f7241e130fecbf13be0431ca3bfaf41654","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.inference_benchmark.parse_args","uri":"program://bitsandbytes/function/benchmarking.inference_benchmark.parse_args#L83-L117","kind":"function","name":"parse_args","path":"benchmarking/inference_benchmark.py","language":"python","start_line":83,"end_line":117,"context_start_line":63,"context_end_line":137,"code":" },\n \"int8-decomp\": {\n \"torch_dtype\": \"float16\",\n \"quantization_scheme\": \"bnb\",\n \"quantization_config\": {\n \"load_in_8bit\": True,\n \"llm_int8_threshold\": 6.0,\n },\n },\n \"int8\": {\n \"torch_dtype\": \"float16\",\n \"quantization_scheme\": \"bnb\",\n \"quantization_config\": {\n \"load_in_8bit\": True,\n \"llm_int8_threshold\": 0.0,\n },\n },\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"bitsandbytes inference benchmark tool\")\n\n parser.add_argument(\"model_id\", type=str, help=\"The model checkpoint to use.\")\n\n parser.add_argument(\n \"--configs\",\n nargs=\"+\",\n choices=[\"bf16\", \"fp16\", \"nf4\", \"nf4-dq\", \"int8\", \"int8-decomp\"],\n default=[\"nf4\", \"int8\", \"int8-decomp\"],\n )\n parser.add_argument(\"--bf16\", dest=\"configs\", action=\"append_const\", const=\"bf16\")\n parser.add_argument(\"--fp16\", dest=\"configs\", action=\"append_const\", const=\"fp16\")\n parser.add_argument(\"--nf4\", dest=\"configs\", action=\"append_const\", const=\"nf4\")\n parser.add_argument(\"--nf4-dq\", dest=\"configs\", action=\"append_const\", const=\"nf4-dq\")\n parser.add_argument(\"--int8\", dest=\"configs\", action=\"append_const\", const=\"int8\")\n parser.add_argument(\"--int8-decomp\", dest=\"configs\", action=\"append_const\", const=\"int8-decomp\")\n\n parser.add_argument(\"--batches\", nargs=\"+\", type=int, default=[1, 8, 16, 32])\n parser.add_argument(\"--input-length\", type=int, default=64)\n\n parser.add_argument(\"--out-dir\", type=str, default=\"reports\")\n\n parser.add_argument(\"--iterations\", type=int, default=10, help=\"Number of iterations for each benchmark run\")\n parser.add_argument(\n \"--warmup-runs\", type=int, default=10, help=\"Number of warmup runs to discard before measurement\"\n )\n parser.add_argument(\n \"--output-length\",\n type=int,\n default=64,\n help=\"If set, `max_new_tokens` and `min_new_tokens` will be set to this value.\",\n )\n\n return parser.parse_args()\n\n\ndef run_benchmark(args, config, batch_size):\n launcher_config = ProcessConfig(device_isolation=True, device_isolation_action=\"warn\", start_method=\"spawn\")\n scenario_config = InferenceConfig(\n latency=True,\n memory=True,\n input_shapes={\"batch_size\": batch_size, \"sequence_length\": args.input_length},\n iterations=args.iterations,\n warmup_runs=args.warmup_runs,\n # set duration to 0 to disable the duration-based stopping criterion\n # this is IMPORTANT to ensure that all benchmarks run the same number of operations, regardless of hardware speed/bottlenecks\n duration=0,\n # for consistent results, set a fixed min and max for output tokens\n generate_kwargs={\"min_new_tokens\": args.output_length, \"max_new_tokens\": args.output_length},\n forward_kwargs={\"min_new_tokens\": args.output_length, \"max_new_tokens\": args.output_length},\n )\n\n backend_config = PyTorchConfig(\n device=\"cuda\",","source_hash":"1ad11cf63293620045523c25941b31f7241e130fecbf13be0431ca3bfaf41654","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.inference_benchmark.run_benchmark","uri":"program://bitsandbytes/function/benchmarking.inference_benchmark.run_benchmark#L120-L163","kind":"function","name":"run_benchmark","path":"benchmarking/inference_benchmark.py","language":"python","start_line":120,"end_line":163,"context_start_line":100,"context_end_line":175,"code":"\n parser.add_argument(\"--batches\", nargs=\"+\", type=int, default=[1, 8, 16, 32])\n parser.add_argument(\"--input-length\", type=int, default=64)\n\n parser.add_argument(\"--out-dir\", type=str, default=\"reports\")\n\n parser.add_argument(\"--iterations\", type=int, default=10, help=\"Number of iterations for each benchmark run\")\n parser.add_argument(\n \"--warmup-runs\", type=int, default=10, help=\"Number of warmup runs to discard before measurement\"\n )\n parser.add_argument(\n \"--output-length\",\n type=int,\n default=64,\n help=\"If set, `max_new_tokens` and `min_new_tokens` will be set to this value.\",\n )\n\n return parser.parse_args()\n\n\ndef run_benchmark(args, config, batch_size):\n launcher_config = ProcessConfig(device_isolation=True, device_isolation_action=\"warn\", start_method=\"spawn\")\n scenario_config = InferenceConfig(\n latency=True,\n memory=True,\n input_shapes={\"batch_size\": batch_size, \"sequence_length\": args.input_length},\n iterations=args.iterations,\n warmup_runs=args.warmup_runs,\n # set duration to 0 to disable the duration-based stopping criterion\n # this is IMPORTANT to ensure that all benchmarks run the same number of operations, regardless of hardware speed/bottlenecks\n duration=0,\n # for consistent results, set a fixed min and max for output tokens\n generate_kwargs={\"min_new_tokens\": args.output_length, \"max_new_tokens\": args.output_length},\n forward_kwargs={\"min_new_tokens\": args.output_length, \"max_new_tokens\": args.output_length},\n )\n\n backend_config = PyTorchConfig(\n device=\"cuda\",\n device_ids=\"0\",\n device_map=\"auto\",\n no_weights=False,\n model=args.model_id,\n **WEIGHTS_CONFIGS[config],\n )\n\n test_name = (\n f\"benchmark-{config}\"\n f\"-bsz-{batch_size}\"\n f\"-isz-{args.input_length}\"\n f\"-osz-{args.output_length}\"\n f\"-iter-{args.iterations}\"\n f\"-wrmup-{args.warmup_runs}\"\n )\n benchmark_config = BenchmarkConfig(\n name=test_name,\n scenario=scenario_config,\n launcher=launcher_config,\n backend=backend_config,\n )\n\n out_path = out_dir / (test_name + \".json\")\n print(f\"[{test_name}] Starting:\")\n benchmark_report = Benchmark.launch(benchmark_config)\n benchmark_report.save_json(out_path)\n\n\nif __name__ == \"__main__\":\n setup_logging(level=\"INFO\")\n args = parse_args()\n\n out_dir = Path(args.out_dir)\n out_dir.mkdir(parents=True, exist_ok=True)\n\n for batch_size in args.batches:\n for config in args.configs:\n run_benchmark(args, config, batch_size)","source_hash":"1ad11cf63293620045523c25941b31f7241e130fecbf13be0431ca3bfaf41654","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.optimizer_benchmark","uri":"program://bitsandbytes/module/benchmarking.optimizer_benchmark#L1-L56","kind":"module","name":"benchmarking.optimizer_benchmark","path":"benchmarking/optimizer_benchmark.py","language":"python","start_line":1,"end_line":56,"context_start_line":1,"context_end_line":56,"code":"\"\"\"\nExtracted from tests/test_optim.py\n\nUsage: pytest benchmarking/optimizer_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nfrom tests.helpers import describe_dtype, id_formatter\nimport torch\n\nimport bitsandbytes as bnb\n\nstr2optimizers = {\"paged_adamw\": (torch.optim.AdamW, bnb.optim.PagedAdamW)}\n\n\n@pytest.mark.parametrize(\"dim1\", [2 * 1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"optim_name\", [\"paged_adamw\"], ids=id_formatter(\"optim_name\"))\n@pytest.mark.parametrize(\"mode\", [\"bnb\"], ids=id_formatter(\"mode\"))\n@pytest.mark.benchmark\ndef test_stream_optimizer_bench(dim1, gtype, optim_name, mode):\n layers1 = torch.nn.Sequential(*torch.nn.ModuleList([torch.nn.Linear(dim1, dim1) for i in range(10)]))\n layers1 = layers1.to(gtype)\n layers1 = layers1.cuda()\n\n large_tensor = None\n if mode == \"torch\":\n optim = str2optimizers[optim_name][0](layers1.parameters())\n else:\n optim = str2optimizers[optim_name][1](layers1.parameters())\n # 12 GB\n large_tensor = torch.empty((int(4.5e9),), device=\"cuda\")\n\n torch.cuda.synchronize()\n time.sleep(5)\n\n num_batches = 5\n batches = torch.randn(num_batches, 128, dim1, device=\"cuda\").to(gtype)\n lbls = torch.randint(0, 10, size=(num_batches, 128)).cuda()\n\n for i in range(num_batches):\n print(i)\n b = batches[i]\n if i == 2:\n torch.cuda.synchronize()\n t0 = time.time()\n\n out1 = layers1(b)\n\n loss1 = torch.nn.functional.cross_entropy(out1, lbls[i]).mean()\n loss1.backward()\n optim.step()\n torch.cuda.synchronize()\n print(mode, time.time() - t0)","source_hash":"975399726e1aa92fc294ed8ca4c8030ce6fb15ba063ac87c0b08459baebc715d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.optimizer_benchmark.test_stream_optimizer_bench","uri":"program://bitsandbytes/function/benchmarking.optimizer_benchmark.test_stream_optimizer_bench#L23-L56","kind":"function","name":"test_stream_optimizer_bench","path":"benchmarking/optimizer_benchmark.py","language":"python","start_line":23,"end_line":56,"context_start_line":3,"context_end_line":56,"code":"\nUsage: pytest benchmarking/optimizer_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nfrom tests.helpers import describe_dtype, id_formatter\nimport torch\n\nimport bitsandbytes as bnb\n\nstr2optimizers = {\"paged_adamw\": (torch.optim.AdamW, bnb.optim.PagedAdamW)}\n\n\n@pytest.mark.parametrize(\"dim1\", [2 * 1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"optim_name\", [\"paged_adamw\"], ids=id_formatter(\"optim_name\"))\n@pytest.mark.parametrize(\"mode\", [\"bnb\"], ids=id_formatter(\"mode\"))\n@pytest.mark.benchmark\ndef test_stream_optimizer_bench(dim1, gtype, optim_name, mode):\n layers1 = torch.nn.Sequential(*torch.nn.ModuleList([torch.nn.Linear(dim1, dim1) for i in range(10)]))\n layers1 = layers1.to(gtype)\n layers1 = layers1.cuda()\n\n large_tensor = None\n if mode == \"torch\":\n optim = str2optimizers[optim_name][0](layers1.parameters())\n else:\n optim = str2optimizers[optim_name][1](layers1.parameters())\n # 12 GB\n large_tensor = torch.empty((int(4.5e9),), device=\"cuda\")\n\n torch.cuda.synchronize()\n time.sleep(5)\n\n num_batches = 5\n batches = torch.randn(num_batches, 128, dim1, device=\"cuda\").to(gtype)\n lbls = torch.randint(0, 10, size=(num_batches, 128)).cuda()\n\n for i in range(num_batches):\n print(i)\n b = batches[i]\n if i == 2:\n torch.cuda.synchronize()\n t0 = time.time()\n\n out1 = layers1(b)\n\n loss1 = torch.nn.functional.cross_entropy(out1, lbls[i]).mean()\n loss1.backward()\n optim.step()\n torch.cuda.synchronize()\n print(mode, time.time() - t0)","source_hash":"975399726e1aa92fc294ed8ca4c8030ce6fb15ba063ac87c0b08459baebc715d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.int8.training_benchmark","uri":"program://bitsandbytes/module/benchmarking.int8.training_benchmark#L1-L173","kind":"module","name":"benchmarking.int8.training_benchmark","path":"benchmarking/int8/training_benchmark.py","language":"python","start_line":1,"end_line":173,"context_start_line":1,"context_end_line":173,"code":"\"\"\"\nExtracted from tests/test_functional.py\n\nUsage: pytest benchmarking/int8/training_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nimport torch\n\nfrom bitsandbytes import functional as F\n\nk = 20\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\n\n\n@pytest.mark.parametrize(\n (\"batch\", \"seq\", \"model\", \"hidden\"),\n [\n pytest.param(2, 512, 4 * 1024, 3 * 4 * 1024, id=\"batch=2, seq=512, model=4k, hidden=12k\"),\n pytest.param(2, 512, 5120, 3 * 5120, id=\"batch=2, seq=512, model=5k, hidden=15k\"),\n pytest.param(2, 512, 12 * 1024, 4 * 12 * 1024, id=\"batch=2, seq=512, model=12k, hidden=48k\"),\n ],\n)\n@pytest.mark.benchmark\ndef test_bench_8bit_training(batch, seq, model, hidden):\n formatB = F.get_special_format_str()\n A = torch.randn(batch, seq, model, device=\"cuda\").half()\n grad = torch.randn(batch, seq, model, device=\"cuda\").half()\n w1 = torch.randint(-128, 127, size=(hidden, model), device=\"cuda\").half()\n w2 = torch.randint(-128, 127, size=(model, hidden), device=\"cuda\").half()\n print(\"\")\n\n # torch.cuda.synchronize()\n ## warmup\n # for i in range(100):\n # torch.matmul(A, w1.t())\n # torch.cuda.synchronize()\n\n dtype = torch.int8\n A = A.view(-1, A.shape[-1]).contiguous()\n grad = grad.view(-1, grad.shape[-1]).contiguous()\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n out1 = torch.matmul(A, w1.t()) # fc1\n # out2 = torch.matmul(out1, w2.t())# fc2\n\n # d1 = torch.matmul(grad, w2) # delta1\n # d2 = torch.matmul(d1, w1) # delta2\n\n # grad1 = torch.einsum('bo,bh->oh', out1, grad) # grad w2\n # grad2 = torch.einsum('bh,bo->ho', A, d2) # grad w1\n\n torch.cuda.synchronize()\n t16 = time.time() - t0\n print(t16)\n\n # torch.cuda.empty_cache()\n\n # Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)\n # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)\n\n # CTw1, Sw1 = F.transform2(Cw1, formatB)\n # CTw2, Sw2 = F.transform2(Cw2, formatB)\n # CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)\n # CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)\n\n # CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)\n # C32A, SA = F.transform2(CA, 'col32')\n ## fc1\n # out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)\n ##out1 = F.mm_dequant(out1_32, Sout1_32, statsAt, statsw1t)\n\n ## fc2\n # Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)\n # C32out1, Sout1 = F.transform2(Cout1, 'col32')\n # out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)\n ##out2 = F.mm_dequant(out2_32, Sout2_32, statsout1t, statsw2t)\n\n ## delta1\n # Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)\n # C32grad, Sgrad = F.transform2(Cgrad, 'col32')\n ##d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)\n ##d1 = F.mm_dequant(d1_32, Sd1_32, statsgradt, statsw2)\n\n ## delta2\n # Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)\n # C32d1, Sd1 = F.transform2(Cd1, 'col32')\n ##d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)\n ##d2 = F.mm_dequant(d2_32, Sd2_32, statsd1t, statsw1)\n\n ## grad1\n # C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)\n # CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)\n ##grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)\n ##grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1, statsgrad)\n\n ## grad2\n # C32At, SAt = F.transform2(CAt, 'col32', transpose=True)\n # CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)\n ##grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)\n ##grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsA, statsd1)\n\n # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)\n\n # Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)\n # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)\n\n # CTw1, Sw1 = F.transform2(Cw1, formatB)\n # CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)\n # CTw2, Sw2 = F.transform2(Cw2, formatB)\n # CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)\n # torch.cuda.synchronize()\n # t0 = time.time()\n # for i in range(k):\n # #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)\n # #CTw1, Sw1 = F.transform2(Cw1, formatB)\n # #Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)\n # #CTw1, Sw1 = F.transform2(Cw1, formatB)\n\n # #CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A, threshold=3.5)\n # CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)\n # #CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)\n # #CTw2, Sw2 = F.transform2(Cw2, formatB)\n # #CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)\n\n # C32A, SA = F.transform2(CA, 'col32')\n\n # # fc1\n # out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)\n # #out1dn = F.mm_dequant(out1_32, Sout1_32, statsA, statsw1)\n\n # #print(coo_tensor.nnz)\n # #out1sp = F.spmm_coo(coo_tensor, w1.t())\n # #print(w1.t().shape)\n # #out1 = out1dn + out1sp\n\n # # fc2\n # Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)\n # C32out1, Sout1 = F.transform2(Cout1, 'col32')\n # out2_32, Sout2_32 = F.igemmlt(C32out1, CTw2, Sout1, Sw2, dtype=dtype)\n # #out2 = F.mm_dequant(out2_32, Sout2_32, statsout1, statsw2)\n\n # # delta1\n # Cgrad, Cgradt, statsgrad, statsgradt, coo_tensor = F.double_quant(grad)\n # C32grad, Sgrad = F.transform2(Cgrad, 'col32')\n # d1_32, Sd1_32 = F.igemmlt(C32grad, CTw2t, Sgrad, Sw2t, dtype=dtype)\n # #d1 = F.mm_dequant(d1_32, Sd1_32, statsgrad, statsw2t)\n\n # # delta2\n # Cd1, Cd1t, statsd1, statsd1t, coo_tensor = F.double_quant(d1)\n # C32d1, Sd1 = F.transform2(Cd1, 'col32')\n # d2_32, Sd2_32 = F.igemmlt(C32d1, CTw1t, Sd1, Sw1t, dtype=dtype)\n # #d2 = F.mm_dequant(d2_32, Sd2_32, statsd1, statsw1t)\n\n # # grad1\n # #C32out1t, Sout1t = F.transform2(Cout1t, 'col32', transpose=True)\n # #CTgradt, Sgradt = F.transform2(Cgradt, formatB, transpose=True)\n # #grad1_32, Sgrad1_32 = F.igemmlt(C32out1t, CTgradt, Sout1t, Sgradt, dtype=dtype)\n # #grad1 = F.mm_dequant(grad1_32, Sgrad1_32, statsout1t, statsgradt)\n\n # ## grad2\n # #C32At, SAt = F.transform2(CAt, 'col32', transpose=True)\n # #CTd1t, Sd1t = F.transform2(Cd1t, formatB, transpose=True)\n # #grad2_32, Sgrad2_32 = F.igemmlt(C32At, CTd1t, SAt, Sd1t, dtype=dtype)\n # #grad2 = F.mm_dequant(grad2_32, Sgrad2_32, statsAt, statsd1t)\n\n # torch.cuda.synchronize()\n # t8 = time.time() - t0\n # print(t8)","source_hash":"f600bf11bff09c2d74a050d019925c9c21b02de9fad7170225bcf4c3294e0f52","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.int8.training_benchmark.test_bench_8bit_training","uri":"program://bitsandbytes/function/benchmarking.int8.training_benchmark.test_bench_8bit_training#L28-L59","kind":"function","name":"test_bench_8bit_training","path":"benchmarking/int8/training_benchmark.py","language":"python","start_line":28,"end_line":59,"context_start_line":8,"context_end_line":79,"code":"\nimport pytest\nimport torch\n\nfrom bitsandbytes import functional as F\n\nk = 20\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\n\n\n@pytest.mark.parametrize(\n (\"batch\", \"seq\", \"model\", \"hidden\"),\n [\n pytest.param(2, 512, 4 * 1024, 3 * 4 * 1024, id=\"batch=2, seq=512, model=4k, hidden=12k\"),\n pytest.param(2, 512, 5120, 3 * 5120, id=\"batch=2, seq=512, model=5k, hidden=15k\"),\n pytest.param(2, 512, 12 * 1024, 4 * 12 * 1024, id=\"batch=2, seq=512, model=12k, hidden=48k\"),\n ],\n)\n@pytest.mark.benchmark\ndef test_bench_8bit_training(batch, seq, model, hidden):\n formatB = F.get_special_format_str()\n A = torch.randn(batch, seq, model, device=\"cuda\").half()\n grad = torch.randn(batch, seq, model, device=\"cuda\").half()\n w1 = torch.randint(-128, 127, size=(hidden, model), device=\"cuda\").half()\n w2 = torch.randint(-128, 127, size=(model, hidden), device=\"cuda\").half()\n print(\"\")\n\n # torch.cuda.synchronize()\n ## warmup\n # for i in range(100):\n # torch.matmul(A, w1.t())\n # torch.cuda.synchronize()\n\n dtype = torch.int8\n A = A.view(-1, A.shape[-1]).contiguous()\n grad = grad.view(-1, grad.shape[-1]).contiguous()\n torch.cuda.synchronize()\n t0 = time.time()\n for i in range(k):\n out1 = torch.matmul(A, w1.t()) # fc1\n # out2 = torch.matmul(out1, w2.t())# fc2\n\n # d1 = torch.matmul(grad, w2) # delta1\n # d2 = torch.matmul(d1, w1) # delta2\n\n # grad1 = torch.einsum('bo,bh->oh', out1, grad) # grad w2\n # grad2 = torch.einsum('bh,bo->ho', A, d2) # grad w1\n\n torch.cuda.synchronize()\n t16 = time.time() - t0\n print(t16)\n\n # torch.cuda.empty_cache()\n\n # Cw1, Cw1t, statsw1, statsw1t, coo_tensor = F.double_quant(w1)\n # Cw2, Cw2t, statsw2, statsw2t, coo_tensor = F.double_quant(w2)\n\n # CTw1, Sw1 = F.transform2(Cw1, formatB)\n # CTw2, Sw2 = F.transform2(Cw2, formatB)\n # CTw2t, Sw2t = F.transform2(Cw2t, formatB, transpose=True)\n # CTw1t, Sw1t = F.transform2(Cw1t, formatB, transpose=True)\n\n # CA, CAt, statsA, statsAt, coo_tensor = F.double_quant(A)\n # C32A, SA = F.transform2(CA, 'col32')\n ## fc1\n # out1_32, Sout1_32 = F.igemmlt(C32A, CTw1, SA, Sw1, dtype=dtype)\n ##out1 = F.mm_dequant(out1_32, Sout1_32, statsAt, statsw1t)\n\n ## fc2\n # Cout1, Cout1t, statsout1, statsout1t, coo_tensor = F.double_quant(out1)\n # C32out1, Sout1 = F.transform2(Cout1, 'col32')","source_hash":"f600bf11bff09c2d74a050d019925c9c21b02de9fad7170225bcf4c3294e0f52","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.int8.int8_benchmark","uri":"program://bitsandbytes/module/benchmarking.int8.int8_benchmark#L1-L68","kind":"module","name":"benchmarking.int8.int8_benchmark","path":"benchmarking/int8/int8_benchmark.py","language":"python","start_line":1,"end_line":68,"context_start_line":1,"context_end_line":68,"code":"\"\"\"\nBasic benchmark for text generation.\n\nUsage: python benchmarking/int8/int8_benchmark.py\n\"\"\"\n\nimport time\n\nimport torch\nfrom torch.profiler import ProfilerActivity, profile\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\nMAX_NEW_TOKENS = 128\nmodel_name = \"meta-llama/Llama-3.1-8B\"\n\ntext = \"Below is a question. I need an answer.\\n\\nExplain machine learning: \"\ntokenizer = AutoTokenizer.from_pretrained(model_name)\ninput_ids = tokenizer([text] * 8, return_tensors=\"pt\").input_ids.to(0)\n\nmodel = AutoModelForCausalLM.from_pretrained(\n model_name,\n device_map=\"auto\",\n quantization_config=BitsAndBytesConfig(\n load_in_8bit=True,\n llm_int8_threshold=6.0,\n ),\n attn_implementation=\"sdpa\",\n torch_dtype=torch.float16,\n)\n\nprint(model)\n\n# warmup\nprint(\"Warmup...\")\nfor i in range(3):\n generated_ids = model.generate(input_ids, max_new_tokens=MAX_NEW_TOKENS)\n\nprint(\"Profiler starting...\")\nwith profile(\n activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],\n with_modules=True,\n with_stack=True,\n) as prof:\n model.generate(input_ids, max_new_tokens=1)\n\nprint(\n prof.key_averages().table(\n sort_by=\"cpu_time_total\",\n max_name_column_width=50,\n top_level_events_only=True,\n row_limit=50,\n )\n)\n\ntorch.cuda.synchronize()\n\n\nprint(\"Generating...\")\nnum = 0\ntime_1 = time.time()\nfor i in range(5):\n generated_ids = model.generate(input_ids, max_new_tokens=MAX_NEW_TOKENS)\n num += len(generated_ids[0])\n\nprint(\"=\" * 40)\nprint(f\"Example:\\n{tokenizer.decode(generated_ids[0])}\")\nprint(\"=\" * 40)\nprint(f\"Speed: {num / (time.time() - time_1)}token/s\")","source_hash":"4d333fc8e5a3de3e2baea077473bb1b8b2553f0a6ec4a19f4120cf86e278c555","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.switchback.make_plot_with_jsonl","uri":"program://bitsandbytes/module/benchmarking.switchback.make_plot_with_jsonl#L1-L151","kind":"module","name":"benchmarking.switchback.make_plot_with_jsonl","path":"benchmarking/switchback/make_plot_with_jsonl.py","language":"python","start_line":1,"end_line":151,"context_start_line":1,"context_end_line":151,"code":"import matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ncmap = plt.get_cmap(\"cool\")\n\nif __name__ == \"__main__\":\n fig = plt.figure(tight_layout=True, figsize=(12, 3.5))\n gs = gridspec.GridSpec(1, 2)\n\n dims_to_consider = [1024, 1280, 1408, 1664, 2048, 4096]\n batch_size_for_plot1 = 32768\n batch_sizes_for_plot2 = [2**14, 2**15, 2**16, 2**17]\n dims_to_xtick = [1024, 2048, 4096]\n logscale_plot1 = True\n\n ax = fig.add_subplot(gs[0, 0])\n\n # TODO: change this to what you want.\n rdf = pd.read_json(\"speed_benchmark/info_a100_py2.jsonl\", lines=True)\n df = rdf[rdf.batch_size == batch_size_for_plot1]\n\n # first plot the time occupied by different operations\n for k, marker, ls, color, name in [\n (\"standard_gx+standard_gw+standard_fwd\", \"s\", \"-\", \"C2\", \"Standard fp16 (sum of parts)\"),\n (\n \"x_quantize_rowwise+g_quantize_rowwise+w_quantize_global+w_quantize_global_transpose+standard_gw+global_fwd+global_bwd\",\n \"o\",\n \"-\",\n \"C4\",\n \"SwitchBack int8 (sum of parts)\",\n ),\n (\"standard_fwd\", \"^\", \"--\", \"C2\", \"Matmul XW (standard)\"),\n (\"standard_gw\", \"^\", \"-.\", \"C2\", \"Matmul GW (standard)\"),\n (\"standard_gx\", \"^\", \":\", \"gray\", \"Matmul GX (both)\"),\n (\"global_fwd\", \"^\", \"--\", \"C4\", \"Int8 Matmul XW (switchback)\"),\n (\"global_bwd\", \"^\", \"-.\", \"C4\", \"Int8 Matmul GW (switchback)\"),\n (\"x_quantize_rowwise\", \"P\", \"--\", \"C4\", \"Quantize rowwise X (switchback)\"),\n (\"g_quantize_rowwise\", \"P\", \"-.\", \"C4\", \"Quantize rowwise G (switchback)\"),\n (\"w_quantize_global\", \".\", \"--\", \"C4\", \"Quantize global W (switchback)\"),\n (\"w_quantize_global_transpose\", \".\", \"-.\", \"C4\", \"Quantize global and\\ntranspose W (switchback)\"),\n ]:\n xs = []\n ys = []\n for embed_dim in dims_to_consider:\n # average over dim -> 4*dim and 4*dim -> dim\n df_ = df[df.dim_in == embed_dim]\n df_ = df_[df_.dim_out == embed_dim * 4]\n xs.append(embed_dim)\n y_ = 0\n for k_ in k.split(\"+\"):\n y_ += df_[k_].values[0]\n df_ = df[df.dim_in == embed_dim * 4]\n df_ = df_[df_.dim_out == embed_dim]\n for k_ in k.split(\"+\"):\n y_ += df_[k_].values[0]\n ys.append(y_ * 0.5)\n\n ax.plot(\n xs,\n ys,\n color=color,\n label=name,\n marker=marker,\n markersize=5 if marker == \"s\" else 5,\n linestyle=ls,\n linewidth=2 if \"+\" in k else 1.0,\n )\n\n ax.set_xlabel(\"dim\", fontsize=13)\n ax.set_ylabel(\"time (ms)\", fontsize=13)\n\n ax.grid()\n\n ax.set_xscale(\"log\")\n if logscale_plot1:\n ax.set_yscale(\"log\")\n\n ax.tick_params(axis=\"x\", labelsize=11)\n ax.tick_params(axis=\"y\", labelsize=11)\n\n ax.set_xticks(dims_to_xtick)\n ax.set_xticklabels(dims_to_xtick)\n ax.set_xticks([], minor=True)\n\n leg = ax.legend(loc=\"upper center\", bbox_to_anchor=(-0.64, 1.0), ncol=1, fontsize=10)\n leg.get_texts()[0].set_fontweight(\"bold\")\n leg.get_texts()[1].set_fontweight(\"bold\")\n plt.subplots_adjust(left=0.1)\n ax.set_title(\" Linear layer, batch * sequence length = 32k\", fontsize=10, loc=\"left\", y=1.05, pad=-20)\n\n ax = fig.add_subplot(gs[0, 1])\n\n # now plot the % speedup for different batch sizes\n for j, batch_size in enumerate(batch_sizes_for_plot2):\n all_xs, all_ys = [], []\n for k, marker, ls, color, name in [\n (\"standard_gx+standard_gw+standard_fwd\", \"s\", \"-\", \"C2\", \"Standard fp16 (total time)\"),\n (\n \"x_quantize_rowwise+g_quantize_rowwise+w_quantize_global+w_quantize_global_transpose+standard_gw+global_fwd+global_bwd\",\n \"o\",\n \"-\",\n \"C4\",\n \"SwitchBack int8 (total time)\",\n ),\n ]:\n xs, ys = [], []\n df = rdf[rdf.batch_size == batch_size]\n for embed_dim in dims_to_consider:\n df_ = df[df.dim_in == embed_dim]\n df_ = df_[df_.dim_out == embed_dim * 4]\n xs.append(embed_dim)\n y_ = 0\n for k_ in k.split(\"+\"):\n y_ += df_[k_].values[0]\n df_ = df[df.dim_in == embed_dim * 4]\n df_ = df_[df_.dim_out == embed_dim]\n for k_ in k.split(\"+\"):\n y_ += df_[k_].values[0]\n ys.append(y_ * 0.5)\n all_xs.append(xs)\n all_ys.append(ys)\n\n color = cmap(j * 0.25)\n real_ys = [-((all_ys[1][i] - all_ys[0][i]) / all_ys[0][i]) * 100 for i in range(len(all_ys[0]))]\n markers = [\"^\", \"v\", \"P\", \"o\"]\n ax.plot(\n all_xs[0],\n real_ys,\n color=color,\n label=f\"batch * sequence length = {batch_size}\",\n marker=markers[j],\n markersize=5 if marker == \"s\" else 5,\n )\n\n ax.legend()\n ax.set_xlabel(\"dim\", fontsize=13)\n ax.set_xscale(\"log\")\n ax.grid()\n ax.set_ylabel(r\"% speedup\", fontsize=13)\n\n ax.tick_params(axis=\"x\", labelsize=11)\n ax.tick_params(axis=\"y\", labelsize=11)\n\n ax.set_xticks(dims_to_xtick)\n ax.set_xticklabels(dims_to_xtick)\n ax.set_xticks([], minor=True)\n\n ax.set_title(\" Linear layer summary, varying dimensions\", fontsize=10, loc=\"left\", y=1.05, pad=-20)\n\n plt.savefig(\"speed_benchmark/plot_with_info.pdf\", bbox_inches=\"tight\")","source_hash":"028048b8f65a244012688a9f6c7dbe8e6e334c7740180a58c8a9eb7d99f13313","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.switchback.speed_benchmark","uri":"program://bitsandbytes/module/benchmarking.switchback.speed_benchmark#L1-L160","kind":"module","name":"benchmarking.switchback.speed_benchmark","path":"benchmarking/switchback/speed_benchmark.py","language":"python","start_line":1,"end_line":160,"context_start_line":1,"context_end_line":160,"code":"import json\nimport time\n\nimport torch\n\nfrom bitsandbytes.triton.int8_matmul_mixed_dequantize import (\n int8_matmul_mixed_dequantize,\n)\nfrom bitsandbytes.triton.int8_matmul_rowwise_dequantize import (\n int8_matmul_rowwise_dequantize,\n)\nfrom bitsandbytes.triton.quantize_columnwise_and_transpose import (\n quantize_columnwise_and_transpose,\n)\nfrom bitsandbytes.triton.quantize_global import (\n quantize_global,\n quantize_global_transpose,\n)\nfrom bitsandbytes.triton.quantize_rowwise import quantize_rowwise\n\n# KNOW ISSUE: need to optimize \"w_quantize_colwise_transpose\" when embeddim is too large.\n\n\ndef get_time(k, fn, info_dict):\n for _ in range(repeat // 2):\n fn()\n\n torch.cuda.synchronize()\n start = time.time()\n for _ in range(repeat):\n fn()\n\n torch.cuda.synchronize()\n end = time.time()\n ms = (end - start) / repeat * 1000\n print(f\"time {k}: {ms:.3f} ms\")\n info_dict[k] = ms\n\n\nif __name__ == \"__main__\":\n torch.manual_seed(0)\n wm = 4\n for dim in [1024, 1280, 1408, 1664, 2048, 4096]:\n # note \"batch_size\" is actually \"batch_size * embed_dim\", which is why it's large\n for batch_size in [256 * 32, 256 * 64, 256 * 128, 256 * 256, 256 * 512]:\n # switch switches dim_in and dim_out\n for switch in [False, True]:\n # hparams\n repeat = 64\n batch_size = batch_size\n dim_out = dim * wm\n dim_in = dim\n if switch:\n dim_out = dim\n dim_in = wm * dim\n\n dim_in = round(dim_in)\n dim_out = round(dim_out)\n\n # simulate forward pass\n x = torch.randn(batch_size, dim_in, dtype=torch.float16).cuda()\n g = torch.randn(batch_size, dim_out, dtype=torch.float16).cuda()\n w = torch.randn(dim_out, dim_in, dtype=torch.float16).cuda()\n\n x_int8 = x.clone().to(torch.int8)\n g_int8 = g.clone().to(torch.int8)\n w_int8 = w.clone().to(torch.int8)\n wt_int8 = w.t().contiguous().clone().to(torch.int8)\n state_x_rowwise = x.max(dim=1)[0]\n state_g_rowwise = g.max(dim=1)[0]\n state_w_columnwise = w.max(dim=0)[0]\n state_w_rowwise = w.max(dim=1)[0]\n state_w_global = w.max()\n\n info = {\n \"repeat\": repeat,\n \"batch_size\": batch_size,\n \"dim_out\": dim_out,\n \"dim_in\": dim_in,\n \"wm\": wm,\n \"switch\": switch,\n }\n\n get_time(\"standard_fwd\", lambda: x.matmul(w.t()), info)\n get_time(\"standard_gw\", lambda: g.t().matmul(x), info)\n get_time(\"standard_gx\", lambda: g.matmul(w), info)\n get_time(\n \"rowwise_fwd\",\n lambda: int8_matmul_rowwise_dequantize(\n x_int8,\n w_int8.t(),\n state_x_rowwise,\n state_w_columnwise,\n None,\n ),\n info,\n )\n get_time(\n \"rowwise_bwd\",\n lambda: int8_matmul_rowwise_dequantize(\n g_int8,\n wt_int8.t(),\n state_x_rowwise,\n state_w_rowwise,\n None,\n ),\n info,\n )\n get_time(\n \"global_fwd\",\n lambda: int8_matmul_mixed_dequantize(x_int8, w_int8.t(), state_x_rowwise, state_w_global, None),\n info,\n )\n get_time(\n \"global_bwd\",\n lambda: int8_matmul_mixed_dequantize(g_int8, wt_int8.t(), state_x_rowwise, state_w_global, None),\n info,\n )\n get_time(\"x_quantize_rowwise\", lambda: quantize_rowwise(x), info)\n get_time(\"g_quantize_rowwise\", lambda: quantize_rowwise(g), info)\n get_time(\"w_quantize_rowwise\", lambda: quantize_rowwise(w), info)\n get_time(\"w_quantize_colwise_transpose\", lambda: quantize_columnwise_and_transpose(w), info)\n get_time(\"w_quantize_global\", lambda: quantize_global(w), info)\n get_time(\"w_quantize_global_transpose\", lambda: quantize_global_transpose(w), info)\n\n time_standard = info[\"standard_fwd\"] + info[\"standard_gx\"] + info[\"standard_gw\"]\n time_rowwise = (\n info[\"x_quantize_rowwise\"]\n + info[\"g_quantize_rowwise\"]\n + info[\"w_quantize_colwise_transpose\"]\n + info[\"w_quantize_rowwise\"]\n + info[\"standard_gw\"]\n + info[\"rowwise_fwd\"]\n + info[\"rowwise_bwd\"]\n )\n time_global = (\n info[\"x_quantize_rowwise\"]\n + info[\"g_quantize_rowwise\"]\n + info[\"w_quantize_global\"]\n + info[\"w_quantize_global_transpose\"]\n + info[\"standard_gw\"]\n + info[\"global_fwd\"]\n + info[\"global_bwd\"]\n )\n\n print(\"TOTAL STANDARD\", time_standard)\n print(\"TOTAL ROWWISE\", time_rowwise)\n print(\"TOTAL GLOBAL\", time_global)\n\n print(\"speedup\", -100 * (time_global - time_standard) / time_standard)\n\n info[\"time_standard\"] = time_standard\n info[\"time_rowwise\"] = time_rowwise\n info[\"time_global\"] = time_global\n\n info_json = json.dumps(info)\n\n # TODO: change this to what you want.\n with open(\"speed_benchmark/info.jsonl\", \"a\") as file:\n file.write(info_json + \"\\n\")","source_hash":"bfd34f8bab2afebcd911eea7c33f6e36f5fac2afefd39e649409071417cf7f91","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.switchback.speed_benchmark.get_time","uri":"program://bitsandbytes/function/benchmarking.switchback.speed_benchmark.get_time#L24-L37","kind":"function","name":"get_time","path":"benchmarking/switchback/speed_benchmark.py","language":"python","start_line":24,"end_line":37,"context_start_line":4,"context_end_line":57,"code":"import torch\n\nfrom bitsandbytes.triton.int8_matmul_mixed_dequantize import (\n int8_matmul_mixed_dequantize,\n)\nfrom bitsandbytes.triton.int8_matmul_rowwise_dequantize import (\n int8_matmul_rowwise_dequantize,\n)\nfrom bitsandbytes.triton.quantize_columnwise_and_transpose import (\n quantize_columnwise_and_transpose,\n)\nfrom bitsandbytes.triton.quantize_global import (\n quantize_global,\n quantize_global_transpose,\n)\nfrom bitsandbytes.triton.quantize_rowwise import quantize_rowwise\n\n# KNOW ISSUE: need to optimize \"w_quantize_colwise_transpose\" when embeddim is too large.\n\n\ndef get_time(k, fn, info_dict):\n for _ in range(repeat // 2):\n fn()\n\n torch.cuda.synchronize()\n start = time.time()\n for _ in range(repeat):\n fn()\n\n torch.cuda.synchronize()\n end = time.time()\n ms = (end - start) / repeat * 1000\n print(f\"time {k}: {ms:.3f} ms\")\n info_dict[k] = ms\n\n\nif __name__ == \"__main__\":\n torch.manual_seed(0)\n wm = 4\n for dim in [1024, 1280, 1408, 1664, 2048, 4096]:\n # note \"batch_size\" is actually \"batch_size * embed_dim\", which is why it's large\n for batch_size in [256 * 32, 256 * 64, 256 * 128, 256 * 256, 256 * 512]:\n # switch switches dim_in and dim_out\n for switch in [False, True]:\n # hparams\n repeat = 64\n batch_size = batch_size\n dim_out = dim * wm\n dim_in = dim\n if switch:\n dim_out = dim\n dim_in = wm * dim\n\n dim_in = round(dim_in)","source_hash":"bfd34f8bab2afebcd911eea7c33f6e36f5fac2afefd39e649409071417cf7f91","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark","uri":"program://bitsandbytes/module/benchmarking.xpu.inference_benchmark#L1-L147","kind":"module","name":"benchmarking.xpu.inference_benchmark","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":1,"end_line":147,"context_start_line":1,"context_end_line":147,"code":"import argparse\nimport time\n\n# import intel_extension_for_pytorch as ipex\nimport numpy as np\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig\n\nMAX_NEW_TOKENS = 256\n\nget_time = time.time\n\nsystem_prompt = \"You are a helpful assistant\"\nuser_prompt = \"\"\"Summarize this text please:\n\n```Tell me, O muse, of that ingenious hero who travelled far and wide after he had sacked the famous town of Troy. Many cities did he visit, and many were the nations with whose manners and customs he was acquainted; moreover he suffered much by sea while trying to save his own life and bring his men safely home; but do what he might he could not save his men, for they perished through their own sheer folly in eating the cattle of the Sun-god Hyperion; so the god prevented them from ever reaching home. Tell me, too, about all these things, O daughter of Jove, from whatsoever source you may know them.\n\nSo now all who escaped death in battle or by shipwreck had got safely home except Ulysses, and he, though he was longing to return to his wife and country, was detained by the goddess Calypso, who had got him into a large cave and wanted to marry him. But as years went by, there came a time when the gods settled that he should go back to Ithaca; even then, however, when he was among his own people, his troubles were not yet over; nevertheless all the gods had now begun to pity him except Neptune, who still persecuted him without ceasing and would not let him get home.\n\nNow Neptune had gone off to the Ethiopians, who are at the world's end, and lie in two halves, the one looking West and the other East. He had gone there to accept a hecatomb of sheep and oxen, and was enjoying himself at his festival; but the other gods met in the house of Olympian Jove, and the sire of gods and men spoke first. At that moment he was thinking of Aegisthus, who had been killed by Agamemnon's son Orestes; so he said to the other gods:\n\n\"See now, how men lay blame upon us gods for what is after all nothing but their own folly. Look at Aegisthus; he must needs make love to Agamemnon's wife unrighteously and then kill Agamemnon, though he knew it would be the death of him; for I sent Mercury to warn him not to do either of these things, inasmuch as Orestes would be sure to take his revenge when he grew up and wanted to return home. Mercury told him this in all good will but he would not listen, and now he has paid for everything in full.\"\n\nThen Minerva said, \"Father, son of Saturn, King of kings, it served Aegisthus right, and so it would any one else who does as he did; but Aegisthus is neither here nor there; it is for Ulysses that my heart bleeds, when I think of his sufferings in that lonely sea-girt island, far away, poor man, from all his friends. It is an island covered with forest, in the very middle of the sea, and a goddess lives there, daughter of the magician Atlas, who looks after the bottom of the ocean, and carries the great columns that keep heaven and earth asunder. This daughter of Atlas has got hold of poor unhappy Ulysses, and keeps trying by every kind of blandishment to make him forget his home, so that he is tired of life, and thinks of nothing but how he may once more see the smoke of his own chimneys. You, sir, take no heed of this, and yet when Ulysses was before Troy did he not propitiate you with many a burnt sacrifice? Why then should you keep on being so angry with him?\"\n\nAnd Jove said, \"My child, what are you talking about? How can I forget Ulysses than whom there is no more capable man on earth, nor more liberal in his offerings to the immortal gods that live in heaven? Bear in mind, however, that Neptune is still furious with Ulysses for having blinded an eye of Polyphemus king of the Cyclopes. Polyphemus is son to Neptune by the nymph Thoosa, daughter to the sea-king Phorcys; therefore though he will not kill Ulysses outright, he torments him by preventing him from getting home. Still, let us lay our heads together and see how we can help him to return; Neptune will then be pacified, for if we are all of a mind he can hardly stand out against us.\"```\"\"\"\n\nprompt = [\n {\"role\": \"system\", \"content\": system_prompt},\n {\"role\": \"user\", \"content\": user_prompt},\n]\n\n\ndef get_inputs(tokenizer):\n inputs = tokenizer.apply_chat_template(\n prompt,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors=\"pt\",\n return_dict=True,\n )\n return inputs\n\n\ndef get_streamer(tokenizer):\n streamer = Streamer(tokenizer)\n # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},\n )\n\n def end(self, *args):\n pass\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Run inference benchmark for LLM models\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"xpu\",\n help=\"Device to run inference on (e.g., xpu, cuda, cpu)\",\n )\n parser.add_argument(\n \"--model-id\",\n type=str,\n default=\"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit\",\n help=\"Model ID from Hugging Face or local path\",\n )\n parser.add_argument(\n \"--attn\",\n type=str,\n default=\"eager\",\n choices=[\"eager\", \"flash_attention\", \"sdpa\"],\n help=\"Attention implementation to use\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n\n device = args.device\n model_id = args.model_id\n\n print(f\"Running inference on {device} with model {model_id}\")\n print(f\"Using attention implementation: {args.attn}\")\n\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation=args.attn)\n\n inputs = get_inputs(tokenizer)\n streamer = get_streamer(tokenizer)\n\n inputs = inputs.to(device)\n model = model.to(device)\n\n generation_config = GenerationConfig(\n use_cache=True,\n forced_eos_token_id=1,\n eos_token_id=1,\n max_new_tokens=MAX_NEW_TOKENS,\n do_sample=False,\n )\n\n outputs = model.generate(\n **inputs,\n streamer=streamer,\n generation_config=generation_config,\n )\n\n # Print the final outputs (including the input prompt)\n output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n\n print(r\"\\Output (including prompt):\")\n print(\"-\" * 40)\n print(output_text)\n print(\"-\" * 40)\n print(f\"Peak memory usage: {torch.xpu.max_memory_allocated() / 1024**2:.0f}MB\")\n\n streamer.print_report()","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.get_inputs","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.get_inputs#L34-L42","kind":"function","name":"get_inputs","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":34,"end_line":42,"context_start_line":14,"context_end_line":62,"code":"user_prompt = \"\"\"Summarize this text please:\n\n```Tell me, O muse, of that ingenious hero who travelled far and wide after he had sacked the famous town of Troy. Many cities did he visit, and many were the nations with whose manners and customs he was acquainted; moreover he suffered much by sea while trying to save his own life and bring his men safely home; but do what he might he could not save his men, for they perished through their own sheer folly in eating the cattle of the Sun-god Hyperion; so the god prevented them from ever reaching home. Tell me, too, about all these things, O daughter of Jove, from whatsoever source you may know them.\n\nSo now all who escaped death in battle or by shipwreck had got safely home except Ulysses, and he, though he was longing to return to his wife and country, was detained by the goddess Calypso, who had got him into a large cave and wanted to marry him. But as years went by, there came a time when the gods settled that he should go back to Ithaca; even then, however, when he was among his own people, his troubles were not yet over; nevertheless all the gods had now begun to pity him except Neptune, who still persecuted him without ceasing and would not let him get home.\n\nNow Neptune had gone off to the Ethiopians, who are at the world's end, and lie in two halves, the one looking West and the other East. He had gone there to accept a hecatomb of sheep and oxen, and was enjoying himself at his festival; but the other gods met in the house of Olympian Jove, and the sire of gods and men spoke first. At that moment he was thinking of Aegisthus, who had been killed by Agamemnon's son Orestes; so he said to the other gods:\n\n\"See now, how men lay blame upon us gods for what is after all nothing but their own folly. Look at Aegisthus; he must needs make love to Agamemnon's wife unrighteously and then kill Agamemnon, though he knew it would be the death of him; for I sent Mercury to warn him not to do either of these things, inasmuch as Orestes would be sure to take his revenge when he grew up and wanted to return home. Mercury told him this in all good will but he would not listen, and now he has paid for everything in full.\"\n\nThen Minerva said, \"Father, son of Saturn, King of kings, it served Aegisthus right, and so it would any one else who does as he did; but Aegisthus is neither here nor there; it is for Ulysses that my heart bleeds, when I think of his sufferings in that lonely sea-girt island, far away, poor man, from all his friends. It is an island covered with forest, in the very middle of the sea, and a goddess lives there, daughter of the magician Atlas, who looks after the bottom of the ocean, and carries the great columns that keep heaven and earth asunder. This daughter of Atlas has got hold of poor unhappy Ulysses, and keeps trying by every kind of blandishment to make him forget his home, so that he is tired of life, and thinks of nothing but how he may once more see the smoke of his own chimneys. You, sir, take no heed of this, and yet when Ulysses was before Troy did he not propitiate you with many a burnt sacrifice? Why then should you keep on being so angry with him?\"\n\nAnd Jove said, \"My child, what are you talking about? How can I forget Ulysses than whom there is no more capable man on earth, nor more liberal in his offerings to the immortal gods that live in heaven? Bear in mind, however, that Neptune is still furious with Ulysses for having blinded an eye of Polyphemus king of the Cyclopes. Polyphemus is son to Neptune by the nymph Thoosa, daughter to the sea-king Phorcys; therefore though he will not kill Ulysses outright, he torments him by preventing him from getting home. Still, let us lay our heads together and see how we can help him to return; Neptune will then be pacified, for if we are all of a mind he can hardly stand out against us.\"```\"\"\"\n\nprompt = [\n {\"role\": \"system\", \"content\": system_prompt},\n {\"role\": \"user\", \"content\": user_prompt},\n]\n\n\ndef get_inputs(tokenizer):\n inputs = tokenizer.apply_chat_template(\n prompt,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors=\"pt\",\n return_dict=True,\n )\n return inputs\n\n\ndef get_streamer(tokenizer):\n streamer = Streamer(tokenizer)\n # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.get_streamer","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.get_streamer#L45-L48","kind":"function","name":"get_streamer","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":45,"end_line":48,"context_start_line":25,"context_end_line":68,"code":"\nAnd Jove said, \"My child, what are you talking about? How can I forget Ulysses than whom there is no more capable man on earth, nor more liberal in his offerings to the immortal gods that live in heaven? Bear in mind, however, that Neptune is still furious with Ulysses for having blinded an eye of Polyphemus king of the Cyclopes. Polyphemus is son to Neptune by the nymph Thoosa, daughter to the sea-king Phorcys; therefore though he will not kill Ulysses outright, he torments him by preventing him from getting home. Still, let us lay our heads together and see how we can help him to return; Neptune will then be pacified, for if we are all of a mind he can hardly stand out against us.\"```\"\"\"\n\nprompt = [\n {\"role\": \"system\", \"content\": system_prompt},\n {\"role\": \"user\", \"content\": user_prompt},\n]\n\n\ndef get_inputs(tokenizer):\n inputs = tokenizer.apply_chat_template(\n prompt,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors=\"pt\",\n return_dict=True,\n )\n return inputs\n\n\ndef get_streamer(tokenizer):\n streamer = Streamer(tokenizer)\n # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.Streamer","uri":"program://bitsandbytes/class/benchmarking.xpu.inference_benchmark.Streamer#L51-L79","kind":"class","name":"Streamer","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":51,"end_line":79,"context_start_line":31,"context_end_line":99,"code":"]\n\n\ndef get_inputs(tokenizer):\n inputs = tokenizer.apply_chat_template(\n prompt,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors=\"pt\",\n return_dict=True,\n )\n return inputs\n\n\ndef get_streamer(tokenizer):\n streamer = Streamer(tokenizer)\n # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},\n )\n\n def end(self, *args):\n pass\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Run inference benchmark for LLM models\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"xpu\",\n help=\"Device to run inference on (e.g., xpu, cuda, cpu)\",\n )\n parser.add_argument(\n \"--model-id\",\n type=str,\n default=\"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit\",\n help=\"Model ID from Hugging Face or local path\",\n )\n parser.add_argument(\n \"--attn\",\n type=str,\n default=\"eager\",","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.parse_arguments","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.parse_arguments#L82-L103","kind":"function","name":"parse_arguments","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":82,"end_line":103,"context_start_line":62,"context_end_line":123,"code":" if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},\n )\n\n def end(self, *args):\n pass\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Run inference benchmark for LLM models\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"xpu\",\n help=\"Device to run inference on (e.g., xpu, cuda, cpu)\",\n )\n parser.add_argument(\n \"--model-id\",\n type=str,\n default=\"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit\",\n help=\"Model ID from Hugging Face or local path\",\n )\n parser.add_argument(\n \"--attn\",\n type=str,\n default=\"eager\",\n choices=[\"eager\", \"flash_attention\", \"sdpa\"],\n help=\"Attention implementation to use\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n\n device = args.device\n model_id = args.model_id\n\n print(f\"Running inference on {device} with model {model_id}\")\n print(f\"Using attention implementation: {args.attn}\")\n\n tokenizer = AutoTokenizer.from_pretrained(model_id)\n model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation=args.attn)\n\n inputs = get_inputs(tokenizer)\n streamer = get_streamer(tokenizer)\n\n inputs = inputs.to(device)\n model = model.to(device)\n","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.__init__","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.__init__#L52-L55","kind":"function","name":"__init__","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":52,"end_line":55,"context_start_line":32,"context_end_line":75,"code":"\n\ndef get_inputs(tokenizer):\n inputs = tokenizer.apply_chat_template(\n prompt,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors=\"pt\",\n return_dict=True,\n )\n return inputs\n\n\ndef get_streamer(tokenizer):\n streamer = Streamer(tokenizer)\n # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.put","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.put#L57-L66","kind":"function","name":"put","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":57,"end_line":66,"context_start_line":37,"context_end_line":86,"code":" tokenize=True,\n add_generation_prompt=True,\n return_tensors=\"pt\",\n return_dict=True,\n )\n return inputs\n\n\ndef get_streamer(tokenizer):\n streamer = Streamer(tokenizer)\n # streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},\n )\n\n def end(self, *args):\n pass\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Run inference benchmark for LLM models\")\n parser.add_argument(\n \"--device\",\n type=str,","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.print_report","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.print_report#L68-L76","kind":"function","name":"print_report","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":68,"end_line":76,"context_start_line":48,"context_end_line":96,"code":" return streamer\n\n\nclass Streamer:\n def __init__(self, tokenizer, print_median=False):\n self.times = []\n self.print_median = print_median\n self.tokenizer = tokenizer\n\n def put(self, t):\n self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},\n )\n\n def end(self, *args):\n pass\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Run inference benchmark for LLM models\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"xpu\",\n help=\"Device to run inference on (e.g., xpu, cuda, cpu)\",\n )\n parser.add_argument(\n \"--model-id\",\n type=str,\n default=\"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit\",\n help=\"Model ID from Hugging Face or local path\",\n )\n parser.add_argument(","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"py:benchmarking.xpu.inference_benchmark.end","uri":"program://bitsandbytes/function/benchmarking.xpu.inference_benchmark.end#L78-L79","kind":"function","name":"end","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":78,"end_line":79,"context_start_line":58,"context_end_line":99,"code":" self.times.append(get_time())\n if len(self.times) > 1:\n print(f\"Token latency: {1000 * (self.times[-1] - self.times[-2]):.1f} ms\")\n\n if len(self.times) % 10 == 3 and self.print_median:\n ts = np.array(self.times)\n diff = ts[1:] - ts[:-1]\n # print(\"Token latency:\", 1000 * diff, \"ms\")\n print(\"Token latency median:\", np.median(1000 * diff), \"ms\")\n\n def print_report(self):\n times = np.array(self.times)\n diff = times[1:] - times[:-1]\n print(f\"Median latency: {round(np.median(diff) * 1000, 2)}ms\")\n percentiles = [10, 25, 50, 75, 90]\n print(\n \"Latency percentiles\",\n {p: round(1000 * float(np.percentile(diff, p)), 1) for p in percentiles},\n )\n\n def end(self, *args):\n pass\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"Run inference benchmark for LLM models\")\n parser.add_argument(\n \"--device\",\n type=str,\n default=\"xpu\",\n help=\"Device to run inference on (e.g., xpu, cuda, cpu)\",\n )\n parser.add_argument(\n \"--model-id\",\n type=str,\n default=\"unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit\",\n help=\"Model ID from Hugging Face or local path\",\n )\n parser.add_argument(\n \"--attn\",\n type=str,\n default=\"eager\",","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:setup.py","uri":"program://bitsandbytes/file/setup.py","kind":"file","name":"setup.py","path":"setup.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom distutils.errors import DistutilsModuleError\nfrom warnings import warn\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.build_py import build_py\nfrom setuptools.dist import Distribution\n\n\n# Tested with wheel v0.29.0\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nclass ExtBuildPy(build_py):\n def run(self):\n # build_cmake needs to be called prior to build_py, as the latter","source_hash":"79736ea7b3eb21628c1e84644a8e088b05a6131123232eceef27ee9a0235337f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:install_cuda.sh","uri":"program://bitsandbytes/file/install_cuda.sh","kind":"file","name":"install_cuda.sh","path":"install_cuda.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport subprocess\nimport sys\nfrom urllib.request import urlretrieve\n\ncuda_versions = {\n \"118\": \"https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run\",\n \"120\": \"https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run\",\n \"121\": \"https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run\",\n \"122\": \"https://developer.download.nvidia.com/compute/cuda/12.2.2/local_installers/cuda_12.2.2_535.104.05_linux.run\",\n \"123\": \"https://developer.download.nvidia.com/compute/cuda/12.3.2/local_installers/cuda_12.3.2_545.23.08_linux.run\",\n \"124\": \"https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run\",\n \"125\": \"https://developer.download.nvidia.com/compute/cuda/12.5.1/local_installers/cuda_12.5.1_555.42.06_linux.run\",\n \"126\": \"https://developer.download.nvidia.com/compute/cuda/12.6.2/local_installers/cuda_12.6.2_560.35.03_linux.run\",\n}\n\n\ndef install_cuda(version, base_path, download_path):\n formatted_version = f\"{version[:-1]}.{version[-1]}\"\n folder = f\"cuda-{formatted_version}\"\n install_path = os.path.join(base_path, folder)","source_hash":"095ec03a4ea2b7e323eddcb098024f3530560ea1ea8bfb677e246d50f72a7df3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:check_bnb_install.py","uri":"program://bitsandbytes/file/check_bnb_install.py","kind":"file","name":"check_bnb_install.py","path":"check_bnb_install.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nimport bitsandbytes as bnb\n\np = torch.nn.Parameter(torch.rand(10, 10).cuda())\na = torch.rand(10, 10).cuda()\n\np1 = p.data.sum().item()\n\nadam = bnb.optim.Adam([p])\n\nout = a * p\nloss = out.sum()\nloss.backward()\nadam.step()\n\np2 = p.data.sum().item()\n\nassert p1 != p2\nprint(\"SUCCESS!\")\nprint(\"Installation was successful!\")","source_hash":"382e8c4c433206303bdb02b54b69b611d3e0f2301fd8636e9ed262cd299b2cf8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:install_cuda.py","uri":"program://bitsandbytes/file/install_cuda.py","kind":"file","name":"install_cuda.py","path":"install_cuda.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport subprocess\nimport sys\nfrom urllib.request import urlretrieve\n\ncuda_versions = {\n \"118\": \"https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run\",\n \"120\": \"https://developer.download.nvidia.com/compute/cuda/12.0.1/local_installers/cuda_12.0.1_525.85.12_linux.run\",\n \"121\": \"https://developer.download.nvidia.com/compute/cuda/12.1.1/local_installers/cuda_12.1.1_530.30.02_linux.run\",\n \"122\": \"https://developer.download.nvidia.com/compute/cuda/12.2.2/local_installers/cuda_12.2.2_535.104.05_linux.run\",\n \"123\": \"https://developer.download.nvidia.com/compute/cuda/12.3.2/local_installers/cuda_12.3.2_545.23.08_linux.run\",\n \"124\": \"https://developer.download.nvidia.com/compute/cuda/12.4.1/local_installers/cuda_12.4.1_550.54.15_linux.run\",\n \"125\": \"https://developer.download.nvidia.com/compute/cuda/12.5.1/local_installers/cuda_12.5.1_555.42.06_linux.run\",\n \"126\": \"https://developer.download.nvidia.com/compute/cuda/12.6.2/local_installers/cuda_12.6.2_560.35.03_linux.run\",\n}\n\n\ndef install_cuda(version, base_path, download_path):\n formatted_version = f\"{version[:-1]}.{version[-1]}\"\n folder = f\"cuda-{formatted_version}\"\n install_path = os.path.join(base_path, folder)","source_hash":"095ec03a4ea2b7e323eddcb098024f3530560ea1ea8bfb677e246d50f72a7df3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/helpers.py","uri":"program://bitsandbytes/file/tests/helpers.py","kind":"file","name":"tests/helpers.py","path":"tests/helpers.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import functools\nfrom io import BytesIO\nfrom itertools import product\nimport os\nimport random\nfrom typing import Any\n\nimport torch\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\n\ntest_dims_rng = random.Random(42)\n\n\nTRUE_FALSE = (True, False)\nBOOLEAN_TRIPLES = list(product(TRUE_FALSE, repeat=3)) # all combinations of (bool, bool, bool)\nBOOLEAN_TUPLES = list(product(TRUE_FALSE, repeat=2)) # all combinations of (bool, bool)\n\n\n@functools.cache\ndef get_available_devices(no_cpu=False):","source_hash":"cffe87870c189c67cc409e26429e3bbb774a0554188f62e402a15dbc393f5fa1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/conftest.py","uri":"program://bitsandbytes/file/tests/conftest.py","kind":"file","name":"tests/conftest.py","path":"tests/conftest.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import gc\nimport random\n\nimport numpy as np\nimport pytest\nimport torch\n\n\ndef _set_seed():\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.mps.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n\n\ndef pytest_runtest_call(item):\n try:\n _set_seed()\n item.runtest()\n except AssertionError as ae:","source_hash":"4efdf5952b7fb2b9a0fa16963063486f79bcb77c6f36de9f9048d366987009e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_linear8bitlt.py","uri":"program://bitsandbytes/file/tests/test_linear8bitlt.py","kind":"file","name":"tests/test_linear8bitlt.py","path":"tests/test_linear8bitlt.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from contextlib import nullcontext\nimport copy\nimport os\nimport pickle\nimport platform\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.nn.modules import Linear8bitLt\nfrom tests.helpers import (\n TRUE_FALSE,\n get_available_devices,\n id_formatter,\n torch_load_from_buffer,\n torch_save_to_buffer,\n)\n\n","source_hash":"6782023e39803826c3932870757cbd57dadcb79783bc689e6b2496a441c45bb1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_parametrize.py","uri":"program://bitsandbytes/file/tests/test_parametrize.py","kind":"file","name":"tests/test_parametrize.py","path":"tests/test_parametrize.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\nimport torch\nimport torch.nn as nn\n\nfrom bitsandbytes import functional as F\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom bitsandbytes.nn.parametrize import (\n Bnb4bitParametrization,\n replace_parameter_4bit,\n replace_parameter_4bit_prequantized,\n)\nfrom tests.helpers import (\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n)\n\n\nclass ParametrizeTestModule(nn.Module):","source_hash":"d9c2ec16607ed41fb16dd5fdf9491951435e9507af6cb4ff4171d736c086e5ed","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_modules.py","uri":"program://bitsandbytes/file/tests/test_modules.py","kind":"file","name":"tests/test_modules.py","path":"tests/test_modules.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import inspect\n\nimport pytest\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\nfrom tests.helpers import get_available_devices, id_formatter, is_supported_on_hpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n\nclass MLP8bit(torch.nn.Module):\n def __init__(self, dim1, dim2, has_fp16_weights=True, threshold=0.0):\n super().__init__()\n self.fc1 = bnb.nn.Linear8bitLt(\n dim1,","source_hash":"3d6ec09b5a60bb5ac1a84e28c30d7dc58ee0ed8a6d9069d61b1c866feb6100b7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_ops.py","uri":"program://bitsandbytes/file/tests/test_ops.py","kind":"file","name":"tests/test_ops.py","path":"tests/test_ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from math import prod\n\nimport pytest\nimport torch\n\nimport bitsandbytes\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom tests.helpers import TRUE_FALSE, get_available_devices, id_formatter, is_supported_on_hpu\n\n# torch.library.opcheck is only available in torch 2.4 and later.\n# When testing with older versions, we will skip it as a no-op.\nif torch.__version__ >= (2, 4):\n opcheck = torch.library.opcheck\nelse:\n opcheck = lambda *args, **kwargs: None\n\n\nclass TestLLMInt8Ops:\n @pytest.mark.parametrize(\"device\", get_available_devices())\n def test_int8_linear_matmul(self, device):\n A = torch.randint(-128, 127, (10, 20), dtype=torch.int8, device=device)","source_hash":"e8c6491c46599880959e5626bd0877e0b9427e63f53e514dedd0ceb866d8fe1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_autograd.py","uri":"program://bitsandbytes/file/tests/test_autograd.py","kind":"file","name":"tests/test_autograd.py","path":"tests/test_autograd.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom tests.helpers import (\n BOOLEAN_TRIPLES,\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n)\n\nTRANSPOSE_VALS = [(False, True), (False, False)]\n\n\n@pytest.mark.parametrize(\"device\", get_available_devices())\n@pytest.mark.parametrize(\"dim1\", [40], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"dim2\", [64, 0], ids=id_formatter(\"dim2\"))\n@pytest.mark.parametrize(\"dim3\", [32], ids=id_formatter(\"dim3\"))\n@pytest.mark.parametrize(\"dim4\", [48], ids=id_formatter(\"dim4\"))","source_hash":"f2957e3b46365e7265c162ce92a8d1ec149cc1c78e43e55cab7625afb5c767c1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_cuda_setup_evaluator.py","uri":"program://bitsandbytes/file/tests/test_cuda_setup_evaluator.py","kind":"file","name":"tests/test_cuda_setup_evaluator.py","path":"tests/test_cuda_setup_evaluator.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\n\n\n@pytest.fixture\ndef cuda120_spec() -> CUDASpecs:\n return CUDASpecs(\n cuda_version_string=\"120\",\n highest_compute_capability=(8, 6),\n cuda_version_tuple=(12, 0),\n )\n\n\n@pytest.mark.skipif(HIP_ENVIRONMENT, reason=\"this test is not supported on ROCm\")\ndef test_get_cuda_bnb_library_path(monkeypatch, cuda120_spec):\n monkeypatch.delenv(\"BNB_CUDA_VERSION\", raising=False)\n assert get_cuda_bnb_library_path(cuda120_spec).stem == \"libbitsandbytes_cuda120\"\n\n","source_hash":"7ae884114f29cbfc9a69a6b7509832c75cf4093a069a6ed5f6f0cf141e807535","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_optim.py","uri":"program://bitsandbytes/file/tests/test_optim.py","kind":"file","name":"tests/test_optim.py","path":"tests/test_optim.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nfrom os.path import join\nimport shutil\nimport sys\nimport time\nimport uuid\n\nfrom lion_pytorch import Lion\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\nfrom tests.helpers import describe_dtype, get_available_devices, id_formatter\n\n# import apex\n\nk = 20\n\n","source_hash":"1685a1d8bbbaa8e5bad37417270e4335ad562a308d71beda23054ef9379cc808","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_deprecated.py","uri":"program://bitsandbytes/file/tests/test_deprecated.py","kind":"file","name":"tests/test_deprecated.py","path":"tests/test_deprecated.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\nfrom tests.helpers import BOOLEAN_TRIPLES, describe_dtype, get_test_dims, id_formatter\nfrom tests.test_autograd import TRANSPOSE_VALS\n\n\n@pytest.mark.deprecated\ndef test_dynamic_quantization():\n diffs = []\n reldiffs = []\n for i in range(100):\n A1 = torch.randn(1024, 1024, device=\"cuda\")\n C, S = F.quantize(A1)\n A2 = F.dequantize(C, S)\n diff = torch.abs(A1 - A2)\n reldiff = diff / torch.abs(A1 + 1e-8)\n diffs.append(diff.mean().item())\n reldiffs.append(reldiff.mean().item())","source_hash":"0af044ebedb84e54edb6e503c4debe8156e01eff3c2fee7958fae251a44a193b","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_generation.py","uri":"program://bitsandbytes/file/tests/test_generation.py","kind":"file","name":"tests/test_generation.py","path":"tests/test_generation.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from itertools import product\nimport math\n\nimport pytest\nimport torch\n\nfrom tests.helpers import TRUE_FALSE, describe_dtype, id_formatter\n\ntransformers = pytest.importorskip(\"transformers\")\n\n\ndef get_4bit_config():\n return transformers.BitsAndBytesConfig(\n load_in_4bit=True,\n load_in_8bit=False,\n llm_int8_threshold=6.0,\n llm_int8_has_fp16_weight=False,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n )","source_hash":"75833380ed4dfd2bf6992aeb8337cd978ac12ea1e73ce2436527958ec3cf152e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_triton.py","uri":"program://bitsandbytes/file/tests/test_triton.py","kind":"file","name":"tests/test_triton.py","path":"tests/test_triton.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\nimport torch\n\nfrom bitsandbytes.nn import Linear8bitLt\nfrom bitsandbytes.nn.triton_based_modules import SwitchBackLinear\nfrom bitsandbytes.triton.triton_utils import is_triton_available\nfrom tests.helpers import TRUE_FALSE\n\n\n@pytest.mark.skipif(\n not is_triton_available() or not torch.cuda.is_available() or not torch.cuda.get_device_capability()[0] >= 8,\n reason=\"This test requires triton and a GPU with compute capability 8.0 or higher.\",\n)\n@pytest.mark.deprecated\n@pytest.mark.parametrize(\"vector_wise_quantization\", TRUE_FALSE)\ndef test_switchback(vector_wise_quantization):\n for dim in [83]:\n for batch in [13]:\n standard = torch.nn.Linear(dim, 4 * dim).cuda().half()\n switchback = (\n SwitchBackLinear(dim, 4 * dim, vector_wise_quantization=vector_wise_quantization).cuda().half()","source_hash":"e062201eb80ecb8b33be857dd321227f5ed900e07f751fda0b3f0baf21428e94","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_functional.py","uri":"program://bitsandbytes/file/tests/test_functional.py","kind":"file","name":"tests/test_functional.py","path":"tests/test_functional.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\nimport platform\nimport random\nimport time\n\nimport einops\nfrom packaging import version\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, ROCM_GPU_ARCH\nfrom tests.helpers import (\n BOOLEAN_TUPLES,\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n get_test_dims,\n id_formatter,\n is_supported_on_hpu,","source_hash":"f5394bdba2d8221a3106ebe7614fb17181b58bbc52dbeb4d354aadc6223c3bc5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:tests/test_linear4bit.py","uri":"program://bitsandbytes/file/tests/test_linear4bit.py","kind":"file","name":"tests/test_linear4bit.py","path":"tests/test_linear4bit.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import copy\nimport os\nimport pickle\nimport platform\nfrom tempfile import TemporaryDirectory\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom tests.helpers import (\n TRUE_FALSE,\n describe_dtype,\n get_available_devices,\n id_formatter,\n is_supported_on_hpu,\n torch_load_from_buffer,\n torch_save_to_buffer,\n)\n","source_hash":"8d572fe895ecade4570cfeec5d70db31d27cc260bf9ee0324095dd5af4da61cf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:examples/compile_inference.py","uri":"program://bitsandbytes/file/examples/compile_inference.py","kind":"file","name":"examples/compile_inference.py","path":"examples/compile_inference.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\nimport torch._dynamo\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\n# torch._dynamo.config.suppress_errors = True\n\ntorch.set_float32_matmul_precision(\"high\")\n\nquantization_config = BitsAndBytesConfig(load_in_8bit=True)\n\n# torch._dynamo.config.capture_dynamic_output_shape_ops = True\n\nmodel_id = \"google/gemma-2-2b-it\"\n# model_id = \"Qwen/Qwen2.5-7B\"\n\ntokenizer = AutoTokenizer.from_pretrained(model_id)\nmodel = AutoModelForCausalLM.from_pretrained(\n model_id,\n quantization_config=quantization_config,\n device_map=\"auto\",\n torch_dtype=torch.bfloat16,","source_hash":"c6c2f00316839af200111d05ba467bcf6a8db180b7522cd7299b914474876020","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:examples/int8_inference_huggingface.py","uri":"program://bitsandbytes/file/examples/int8_inference_huggingface.py","kind":"file","name":"examples/int8_inference_huggingface.py","path":"examples/int8_inference_huggingface.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":19,"code":"import torch\nfrom transformers import LlamaForCausalLM, LlamaTokenizer\n\nMAX_NEW_TOKENS = 128\nmodel_name = \"meta-llama/Llama-2-7b-hf\"\n\ntext = \"Hamburg is in which country?\\n\"\ntokenizer = LlamaTokenizer.from_pretrained(model_name)\ninput_ids = tokenizer(text, return_tensors=\"pt\").input_ids\n\nmax_memory = f\"{int(torch.cuda.mem_get_info()[0] / 1024**3) - 2}GB\"\n\nn_gpus = torch.cuda.device_count()\nmax_memory = {i: max_memory for i in range(n_gpus)}\n\nmodel = LlamaForCausalLM.from_pretrained(model_name, device_map=\"auto\", load_in_8bit=True, max_memory=max_memory)\n\ngenerated_ids = model.generate(input_ids, max_length=MAX_NEW_TOKENS)\nprint(tokenizer.decode(generated_ids[0], skip_special_tokens=True))","source_hash":"719ba7d86b5082cfd9e137ced3f117544f8cac4fd65ea17bb797bf509d52b401","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/cuda_specs.py","uri":"program://bitsandbytes/file/bitsandbytes/cuda_specs.py","kind":"file","name":"bitsandbytes/cuda_specs.py","path":"bitsandbytes/cuda_specs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import dataclasses\nfrom functools import lru_cache\nimport logging\nimport re\nimport subprocess\nfrom typing import Optional\n\nimport torch\n\n\n@dataclasses.dataclass(frozen=True)\nclass CUDASpecs:\n highest_compute_capability: tuple[int, int]\n cuda_version_string: str\n cuda_version_tuple: tuple[int, int]\n\n @property\n def has_imma(self) -> bool:\n return torch.version.hip or self.highest_compute_capability >= (7, 5)\n\n","source_hash":"dccd31d07340ecf25ce63ea5ec5cf51ecc6415175b2a5a57b09765c8c7aa0c5d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/_ops.py","uri":"program://bitsandbytes/file/bitsandbytes/_ops.py","kind":"file","name":"bitsandbytes/_ops.py","path":"bitsandbytes/_ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Sequence\nfrom math import prod\nfrom typing import Optional\n\nimport torch\n\n_IS_TORCH_GTE_24 = False\n\nif hasattr(torch.library, \"register_fake\"):\n _IS_TORCH_GTE_24 = True\n register_fake = torch.library.register_fake\n register_kernel = torch.library.register_kernel\nelse:\n # PyTorch <= 2.3\n register_fake = torch.library.impl_abstract\n register_kernel = torch.library.impl\n\n# Int8 mixed precision matmul + dequant + bias\ntorch.library.define(\n \"bitsandbytes::int8_mixed_scaled_mm\",\n \"(Tensor A, Tensor CA, Tensor CB, Tensor SCA, Tensor SCB, Tensor? outlier_cols=None, Tensor? bias=None) -> (Tensor, Tensor?)\",","source_hash":"343fe18d26c8a3e6df8e482e5bcc5e77d1d3137ad4f0f523440a2064db1dbcf4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/functional.py","uri":"program://bitsandbytes/file/bitsandbytes/functional.py","kind":"file","name":"bitsandbytes/functional.py","path":"bitsandbytes/functional.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections.abc import Iterable\nimport ctypes as ct\nimport itertools\nfrom math import prod\nfrom typing import Any, Optional, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import deprecated\n\nfrom bitsandbytes.utils import pack_dict_to_tensor, unpack_tensor_to_dict\n\nfrom .cextension import HIP_ENVIRONMENT, lib\n\nname2qmap = {}\n","source_hash":"5605cff3b8ffc5a7671830053a131c15967b398a0bde19a0fbf1c13e7cbe3faf","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/__init__.py","uri":"program://bitsandbytes/file/bitsandbytes/__init__.py","kind":"file","name":"bitsandbytes/__init__.py","path":"bitsandbytes/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport importlib\nimport sys\n\nimport torch\n\nfrom . import _ops, research, utils\nfrom .autograd._functions import (\n MatmulLtState,\n matmul,\n matmul_4bit,\n)\nfrom .backends.cpu import ops as cpu_ops\nfrom .backends.default import ops as default_ops\nfrom .nn import modules\nfrom .optim import adam","source_hash":"ac47d0d61226a5411092c59ababba2505e599467f3f8f92eddabd995b7ef7ff1","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/__main__.py","uri":"program://bitsandbytes/file/bitsandbytes/__main__.py","kind":"file","name":"bitsandbytes/__main__.py","path":"bitsandbytes/__main__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":4,"code":"if __name__ == \"__main__\":\n from bitsandbytes.diagnostics.main import main\n\n main()","source_hash":"037880f501fcc9f9feeb4b8deb0ffb5948ae44833befbad9fe97ff1ec66db1ee","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/utils.py","uri":"program://bitsandbytes/file/bitsandbytes/utils.py","kind":"file","name":"bitsandbytes/utils.py","path":"bitsandbytes/utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport shlex\nimport subprocess\n\nimport torch\n\n\ndef outlier_hook(module, input):\n assert isinstance(module, torch.nn.Linear)\n tracer = OutlierTracer.get_instance()\n hvalue = tracer.get_hvalue(module.weight)\n if hvalue not in tracer.hvalue2outlier_idx:\n outlier_idx = find_outlier_dims(module.weight)\n tracer.outliers.append(outlier_idx)\n tracer.hvalues.append(hvalue)\n if len(tracer.outliers) > 1:\n # assign the current layer the outlier idx found from the weight\n # of the previous linear layer\n if tracer.outliers[-1].numel() > 0:\n assert tracer.outliers[-1].max() < module.weight.shape[1]\n tracer.hvalue2outlier_idx[hvalue] = tracer.outliers[-1]","source_hash":"32f5b1b67b5863eff7753df611fdd08575cbafcf70a7c241fd1c8fee2bd2fdb8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/consts.py","uri":"program://bitsandbytes/file/bitsandbytes/consts.py","kind":"file","name":"bitsandbytes/consts.py","path":"bitsandbytes/consts.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":12,"code":"from pathlib import Path\nimport platform\n\nDYNAMIC_LIBRARY_SUFFIX = {\n \"Darwin\": \".dylib\",\n \"Linux\": \".so\",\n \"Windows\": \".dll\",\n}.get(platform.system(), \".so\")\n\nPACKAGE_DIR = Path(__file__).parent\nPACKAGE_GITHUB_URL = \"https://github.com/TimDettmers/bitsandbytes\"\nNONPYTORCH_DOC_URL = \"https://github.com/TimDettmers/bitsandbytes/blob/main/docs/source/nonpytorchcuda.mdx\"","source_hash":"c5f0da75001ac7e99ac6a907ad535b67a5084c9a12d1781c51fa299a32e08a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/cextension.py","uri":"program://bitsandbytes/file/bitsandbytes/cextension.py","kind":"file","name":"bitsandbytes/cextension.py","path":"bitsandbytes/cextension.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import ctypes as ct\nimport functools\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nfrom typing import Optional\n\nimport torch\n\nfrom bitsandbytes.consts import DYNAMIC_LIBRARY_SUFFIX, PACKAGE_DIR\nfrom bitsandbytes.cuda_specs import CUDASpecs, get_cuda_specs, get_cuda_version_tuple, get_rocm_gpu_arch\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_cuda_bnb_library_path(cuda_specs: CUDASpecs) -> Path:\n \"\"\"\n Get the disk path to the CUDA BNB native library specified by the\n given CUDA specs, taking into account the `BNB_CUDA_VERSION` override environment variable.\n","source_hash":"77ea667c12a169f484de54d6e13d7fc4411c79844c0a9bdb6a99d3d5c8f8de7d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/utils.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/utils.py","kind":"file","name":"bitsandbytes/backends/utils.py","path":"bitsandbytes/backends/utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import subprocess\n\nfrom packaging import version\nimport torch\n\ntry:\n import triton # noqa: F401\n import triton.language as tl # noqa: F401\n\n triton_available = True\nexcept ImportError:\n triton_available = False\n\n\n_NF4_QUANT_TABLE = torch.tensor(\n [\n -1.0,\n -0.6961928009986877,\n -0.5250730514526367,\n -0.39491748809814453,\n -0.28444138169288635,","source_hash":"242b8fed686cc3a2d52b0b2e86b321d5540ee3932ae02dbb739c5c23dca5edf3","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/default/ops.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/default/ops.py","kind":"file","name":"bitsandbytes/backends/default/ops.py","path":"bitsandbytes/backends/default/ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Sequence\nfrom math import prod, sqrt\nfrom typing import Optional\n\nimport torch\n\nfrom ..._ops import register_kernel\nfrom ..utils import CODE\n\n\n@register_kernel(\"bitsandbytes::int8_mm_dequant\", \"default\")\ndef _(\n A: torch.Tensor,\n row_stats: torch.Tensor,\n col_stats: torch.Tensor,\n dtype: Optional[torch.dtype] = None,\n bias: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n torch._check(A.dtype == torch.int32, lambda: f\"A must be int32, got {A.dtype}\")\n torch._check(row_stats.dtype == torch.float32, lambda: f\"row_stats must be float32, got {row_stats.dtype}\")\n torch._check(col_stats.dtype == torch.float32, lambda: f\"col_stats must be float32, got {col_stats.dtype}\")","source_hash":"8955f88af54dada2f560f1c4fb88d659605be37ed6be2091583c9422521298c8","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/hpu/ops.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/hpu/ops.py","kind":"file","name":"bitsandbytes/backends/hpu/ops.py","path":"bitsandbytes/backends/hpu/ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Sequence\nimport math\n\nimport torch\n\nfrom ..._ops import register_kernel\nfrom ..utils import GAUDI_SW_VER\n\n\n# convert btw standard 4-bit compression format and ipex compression format\n# needed for backward compatibility with older versions of gaudi sw\ndef _reverse_4bit_compress_format(weight: torch.Tensor):\n out_1 = (weight & 0xF0) >> 4\n out_2 = (weight & 0xF) << 4\n out = out_1 | out_2\n return out\n\n\n@register_kernel(\"bitsandbytes::dequantize_4bit\", \"hpu\")\ndef _(\n A: torch.Tensor,","source_hash":"92d95b146a73a4050c962966f053d6f8433272d48625d6612d8551689ddc0c30","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/triton/kernels_4bit.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/triton/kernels_4bit.py","kind":"file","name":"bitsandbytes/backends/triton/kernels_4bit.py","path":"bitsandbytes/backends/triton/kernels_4bit.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nimport triton\nimport triton.language as tl\n\n\n# Triton implementation of similar CUDA kernel to avoid loading code from csrc/kernels.cu::dQuantizeFP4\n# @triton.autotune(\n# configs=[\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2, \"grf_mode\": \"auto\"}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 1}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 2}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 4}),\n# triton.Config({\"SPLIT_NUM_BLOCKS\": 8}),\n# ],\n# key=[\"n_elements\"],\n# )\n@triton.jit\ndef quantize_fp4_blockwise_kernel(\n A_ptr,","source_hash":"5d8c183b47a790f1b2fc940c2f000f0259533d2a1e032aecb11931c52b62887a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/triton/ops.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/triton/ops.py","kind":"file","name":"bitsandbytes/backends/triton/ops.py","path":"bitsandbytes/backends/triton/ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Sequence\nfrom typing import Optional\n\nimport torch\n\nfrom . import kernels_4bit, kernels_8bit_quant, kernels_optim\n\n# currently codes unused, kept for reference\n# Should be the same for quant/dequant\n# from bitsandbytes.functional import get_4bit_type\n# _FP4_QUANT_TABLE = get_4bit_type(\"fp4\", device=\"xpu\")\n# _NF4_QUANT_TABLE = get_4bit_type(\"nf4\", device=\"xpu\")\ndevice_type = torch.accelerator.current_accelerator().type if hasattr(torch, \"accelerator\") else \"cuda\"\ntorch_accelerator_module = getattr(torch, device_type, torch.cuda)\n\n\ndef quantize_blockwise(A: torch.Tensor, code: torch.Tensor, blocksize: int) -> tuple[torch.Tensor, torch.Tensor]:\n torch._check_is_size(blocksize)\n # torch._check(A.dtype == torch.float32, lambda: f\"A must be float32 on xpu, got {A.dtype}\")\n with torch_accelerator_module.device(A.device):\n out, absmax = kernels_8bit_quant.quantize_blockwise_triton(A, code, blocksize)","source_hash":"17b36ad387b85c667f35f21184e6944e0e5a0ed32046970e9be1899ca3120c17","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/triton/kernels_optim.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/triton/kernels_optim.py","kind":"file","name":"bitsandbytes/backends/triton/kernels_optim.py","path":"bitsandbytes/backends/triton/kernels_optim.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\nfrom typing import Optional\n\nimport torch\n\nimport triton\nimport triton.language as tl\n\n# from triton.language.extra import libdevice\nfrom .kernels_8bit_quant import (\n dequant_8bit_blockwise,\n dequant_8bit_blockwise_kernel_util,\n quantize_8bit_blockwise_kernel_util,\n quantize_blockwise_triton,\n)\n\nMOMENTUM = 0\nRMSPROP = 1\nADAGRAD = 2\nADAM = 3\n# LION should be larger than MOMENTUM, RMSPROP, ADAGRAD due to comparison in kernels","source_hash":"5929c0239b7327ada8f7185668fa6a7727b0c83759483613bd0b541d7b040ab5","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/triton/kernels_8bit_quant.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/triton/kernels_8bit_quant.py","kind":"file","name":"bitsandbytes/backends/triton/kernels_8bit_quant.py","path":"bitsandbytes/backends/triton/kernels_8bit_quant.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nimport triton\nimport triton.language as tl\n\n\n# @triton.autotune(\n# configs=[\n# # triton.Config({'SPLIT_SIZE': 64}),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 64, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128}),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'large'}, num_stages=4, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 128, 'grf_mode': 'auto'}, num_stages=4, num_warps=32),\n# triton.Config({\"SPLIT_SIZE\": 256}),\n# # triton.Config({'SPLIT_SIZE': 256, 'grf_mode': 'large'}, num_stages=2, num_warps=32),\n# # triton.Config({'SPLIT_SIZE': 256, 'grf_mode': 'auto'}, num_stages=2, num_warps=32),","source_hash":"92774d59c8f4ffe143b52997529c9ac5fc4f7c73831aafd6cf9d425de9a3adfc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/cpu/ops.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/cpu/ops.py","kind":"file","name":"bitsandbytes/backends/cpu/ops.py","path":"bitsandbytes/backends/cpu/ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import ctypes as ct\nimport logging\n\nimport torch\n\nfrom bitsandbytes.functional import get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import ErrorHandlerMockBNBNativeLibrary, lib\n\nlogger = logging.getLogger(__name__)\n\n# torch._int_mm for s8@s8->s32 is supported on CPU from torch 2.4+.\n# However, we can overflow if we use this without AVX512_VNNI support.\n# This is fixed in torch 2.6+, so we set this as the minimum to be safe.\n# For more information: https://github.com/pytorch/pytorch/pull/136942\n# TODO(matthewdouglas): aarch64?\nif torch.__version__ >= (2, 6):\n\n @register_kernel(\"bitsandbytes::int8_linear_matmul\", \"cpu\")\n def _(A: torch.Tensor, B: torch.Tensor):","source_hash":"386883e4f935273743d6426b5a53ffc55bc3686f898041efbf234ba2d9962504","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/cuda/ops.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/cuda/ops.py","kind":"file","name":"bitsandbytes/backends/cuda/ops.py","path":"bitsandbytes/backends/cuda/ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Sequence\nimport ctypes as ct\nfrom math import prod\nfrom typing import Optional\n\nimport torch\n\nfrom bitsandbytes.functional import CUBLAS_Context, _cuda_device_of, _get_tensor_stream, get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import HIP_ENVIRONMENT, lib\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul\", \"cuda\")\ndef _(A: torch.Tensor, B: torch.Tensor):\n out = torch.empty((*A.shape[:-1], B.shape[0]), device=A.device, dtype=torch.int32)\n return _int8_linear_matmul_impl(A, B, out)\n\n\n@register_kernel(\"bitsandbytes::int8_linear_matmul.out\", \"cuda\")\ndef _(A: torch.Tensor, B: torch.Tensor, out: torch.Tensor):","source_hash":"c31f6e0a548a168f99abf902ee246860724607807eee83c03954002c35336ed6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/backends/xpu/ops.py","uri":"program://bitsandbytes/file/bitsandbytes/backends/xpu/ops.py","kind":"file","name":"bitsandbytes/backends/xpu/ops.py","path":"bitsandbytes/backends/xpu/ops.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Sequence\nimport ctypes as ct\nimport logging\n\nfrom packaging import version\nimport torch\n\nfrom bitsandbytes.functional import _get_tensor_stream, get_ptr\n\nfrom ..._ops import register_kernel\nfrom ...cextension import ErrorHandlerMockBNBNativeLibrary, lib\nfrom ..utils import triton_available\n\nlogger = logging.getLogger(__name__)\n\n# _int_mm is available in torch starting from 2.9 version\nif version.parse(torch.__version__).release >= version.parse(\"2.9\").release:\n\n @register_kernel(\"bitsandbytes::int8_linear_matmul\", \"xpu\")\n def _(A: torch.Tensor, B: torch.Tensor):\n return torch._int_mm(","source_hash":"f55e99efbc3b835e8a6293b713803dc3ebce7225b1b5ef01b1dadf8c2342377e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","kind":"file","name":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","path":"bitsandbytes/triton/int8_matmul_rowwise_dequantize.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_rowwise_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and columnwise quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()","source_hash":"dad172677f7537ee1e30efc8b57f56e631d8536ac85d6ee8a433937ae982a177","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/triton_utils.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/triton_utils.py","kind":"file","name":"bitsandbytes/triton/triton_utils.py","path":"bitsandbytes/triton/triton_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":11,"code":"import functools\n\n\n@functools.lru_cache(None)\ndef is_triton_available():\n try:\n from torch.utils._triton import has_triton, has_triton_package\n\n return has_triton_package() and has_triton()\n except Exception:\n return False","source_hash":"74baf2369f2dafd058d584ed503762ddfbfd90025851b89ea82f10a6b956f5bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/quantize_global.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/quantize_global.py","kind":"file","name":"bitsandbytes/triton/quantize_global.py","path":"bitsandbytes/triton/quantize_global.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def quantize_global_transpose(input):\n return None\n\n def quantize_global(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # global quantize\n @triton.autotune(\n configs=[\n triton.Config({\"BLOCK_SIZE\": 1024}, num_warps=4),\n triton.Config({\"BLOCK_SIZE\": 2048}, num_stages=1),\n ],","source_hash":"eb761777b41fe7aea95cf8d8c826a5ec7e3ffc6bd681f43525bbdff495176ac4","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/quantize_columnwise_and_transpose.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/quantize_columnwise_and_transpose.py","kind":"file","name":"bitsandbytes/triton/quantize_columnwise_and_transpose.py","path":"bitsandbytes/triton/quantize_columnwise_and_transpose.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\n\nimport torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def quantize_columnwise_and_transpose(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # This kernel does fused columnwise quantization and transpose.\n\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1),\n triton.Config({}, num_stages=2),","source_hash":"c09bbb8aa605d84ab832f5da7d302b5b73219421dae9ecc531efa283a247fb4f","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/quantize_rowwise.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/quantize_rowwise.py","kind":"file","name":"bitsandbytes/triton/quantize_rowwise.py","path":"bitsandbytes/triton/quantize_rowwise.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\n\nimport torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def quantize_rowwise(x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # rowwise quantize\n\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),","source_hash":"8a940efb13093778ac9a0a646dafc13ad26c924776d5d8be00149ac39f52bcda","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/dequantize_rowwise.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/dequantize_rowwise.py","kind":"file","name":"bitsandbytes/triton/dequantize_rowwise.py","path":"bitsandbytes/triton/dequantize_rowwise.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\n\nimport torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def dequantize_rowwise(x: torch.Tensor, state_x: torch.Tensor):\n return None\nelse:\n import triton\n import triton.language as tl\n\n # rowwise quantize\n\n # TODO: autotune this better.\n @triton.autotune(\n configs=[\n triton.Config({}, num_stages=1, num_warps=8),\n triton.Config({}, num_stages=2, num_warps=8),","source_hash":"fee3536265b033b4519097ecdd75c3c7ccb3860e478924e91bd0aaccec860222","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/int8_matmul_mixed_dequantize.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/int8_matmul_mixed_dequantize.py","kind":"file","name":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","path":"bitsandbytes/triton/int8_matmul_mixed_dequantize.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\n\nfrom bitsandbytes.triton.triton_utils import is_triton_available\n\nif not is_triton_available():\n\n def int8_matmul_mixed_dequantize(a, b, state_x, state_w, bias):\n return None\nelse:\n import triton\n import triton.language as tl\n\n from .matmul_perf_model import early_config_prune, estimate_matmul_time\n\n # This is a matmul kernel based on triton.ops.matmul\n # It is modified to support rowwise quantized input and global quantized weight\n # It's purpose is fused matmul then dequantize\n # It does support bias.\n\n def init_to_zero(name):\n return lambda nargs: nargs[name].zero_()","source_hash":"00d38ef8d3adde6f165ea22c56ece9f24acb177e88b926ea3e85a75aae1db6de","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/triton/matmul_perf_model.py","uri":"program://bitsandbytes/file/bitsandbytes/triton/matmul_perf_model.py","kind":"file","name":"bitsandbytes/triton/matmul_perf_model.py","path":"bitsandbytes/triton/matmul_perf_model.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Adapted from https://github.com/triton-lang/kernels/blob/eeeebdd8be7d13629de22d600621e6234057eed3/kernels/matmul_perf_model.py\n# https://github.com/triton-lang/kernels is licensed under the MIT License.\n\nimport functools\nimport heapq\n\nimport torch\n\nfrom triton import cdiv\nfrom triton.runtime import driver\nfrom triton.testing import (\n get_dram_gbps,\n get_max_simd_tflops,\n get_max_tensorcore_tflops,\n nvsmi,\n)\n\n\n@functools.lru_cache\ndef get_clock_rate_in_khz():\n try:","source_hash":"bbbdc6a7678a663347d0f1396e2a80a92d9c572b49c6196e2fffa2a954110371","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/research/__init__.py","uri":"program://bitsandbytes/file/bitsandbytes/research/__init__.py","kind":"file","name":"bitsandbytes/research/__init__.py","path":"bitsandbytes/research/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":6,"code":"from . import nn\nfrom .autograd._functions import (\n matmul_fp8_global,\n matmul_fp8_mixed,\n switchback_bnb,\n)","source_hash":"ed89eefc99ec73d58c12f5d49118833f41123f0d4f7e3893be77f346dd48eda9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/research/nn/__init__.py","uri":"program://bitsandbytes/file/bitsandbytes/research/nn/__init__.py","kind":"file","name":"bitsandbytes/research/nn/__init__.py","path":"bitsandbytes/research/nn/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"from .modules import LinearFP8Global, LinearFP8Mixed","source_hash":"0fbf6867b9428888b5cc2f88e994450c64b9c07bbd47abee96588006df1af12e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/research/nn/modules.py","uri":"program://bitsandbytes/file/bitsandbytes/research/nn/modules.py","kind":"file","name":"bitsandbytes/research/nn/modules.py","path":"bitsandbytes/research/nn/modules.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing import TypeVar\n\nimport torch\nfrom torch import nn\n\nimport bitsandbytes as bnb\n\nT = TypeVar(\"T\", bound=\"torch.nn.Module\")\n\n\nclass LinearFP8Mixed(nn.Linear):\n def __init__(self, input_features, output_features, bias=True):\n super().__init__(input_features, output_features, bias)\n self.bw_code = None\n self.fw_code = None\n array = [4096, 2048, 1024, 512, 256, 128, 64, 0]\n for i, k in enumerate(array):\n if input_features > array[i + 1]:\n self.bsz = k\n break\n for i, k in enumerate(array):","source_hash":"d6e228ea96e0bf873160de9367ca61a5e525ffb1bb0308c2665e3568e1bb8a53","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/research/autograd/_functions.py","uri":"program://bitsandbytes/file/bitsandbytes/research/autograd/_functions.py","kind":"file","name":"bitsandbytes/research/autograd/_functions.py","path":"bitsandbytes/research/autograd/_functions.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from functools import reduce # Required in Python 3\nimport operator\nfrom typing import Optional\nimport warnings\n\nimport torch\n\nfrom bitsandbytes.autograd._functions import GlobalOutlierPooler, MatmulLtState\nimport bitsandbytes.functional as F\n\n\n# math.prod not compatible with python < 3.8\ndef prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\n\nclass MatMulFP8Mixed(torch.autograd.Function):\n # forward is the same, but we added the fallback for pre-turing GPUs\n # backward is mostly the same, but adds one extra clause (see \"elif state.CxB is not None\")\n\n @staticmethod","source_hash":"8c5047bbe36a6b6d5b6837dd58ed06f214467941a8499227b81ba673117413bc","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/rmsprop.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/rmsprop.py","kind":"file","name":"bitsandbytes/optim/rmsprop.py","path":"bitsandbytes/optim/rmsprop.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass RMSprop(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n alpha=0.99,\n eps=1e-8,\n weight_decay=0,\n momentum=0,\n centered=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,","source_hash":"d9111e77e0669cd46be0938bde2d6bda5689c7726a8d047ff3950081cc597631","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/sgd.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/sgd.py","kind":"file","name":"bitsandbytes/optim/sgd.py","path":"bitsandbytes/optim/sgd.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass SGD(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,","source_hash":"a88ba2bbbb5e98b90463fb0886a382c1e6c66005e9a0910da5404c74f2371e1d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/adamw.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/adamw.py","kind":"file","name":"bitsandbytes/optim/adamw.py","path":"bitsandbytes/optim/adamw.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass AdamW(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=1e-2,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,","source_hash":"58cb811e36c96edf9e931427662c697abdbfbada58848c8edc61a3ac455e3583","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/lion.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/lion.py","kind":"file","name":"bitsandbytes/optim/lion.py","path":"bitsandbytes/optim/lion.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass Lion(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-4,\n betas=(0.9, 0.99),\n weight_decay=0,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,\n is_paged=False,\n ):","source_hash":"6ad8e22396d988cb9903a33cbb393807e2fc5cc23158486ea2e6ba0664bac5e9","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/ademamix.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/ademamix.py","kind":"file","name":"bitsandbytes/optim/ademamix.py","path":"bitsandbytes/optim/ademamix.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Iterable\nimport math\nfrom typing import Literal, Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass _ReferenceAdEMAMix(torch.optim.Optimizer):\n \"\"\"\n Reference: https://hf.co/papers/2409.03137\n \"\"\"\n\n def __init__(\n self,\n params: Iterable[torch.nn.Parameter],\n lr: float = 1e-3,\n betas: tuple[float, float, float] = (0.9, 0.999, 0.9999),\n alpha: float = 5.0,","source_hash":"fbe7d4db179aa35b487b9cc9bec3629791b9f5e7d7fd2a85c5da560e9e3e883d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/adam.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/adam.py","kind":"file","name":"bitsandbytes/optim/adam.py","path":"bitsandbytes/optim/adam.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass Adam(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,","source_hash":"faec04ca0c7dfca630d7dab56f10b4ee847bcc7307e5e9c49ffd7d2016eb0f92","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/lamb.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/lamb.py","kind":"file","name":"bitsandbytes/optim/lamb.py","path":"bitsandbytes/optim/lamb.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer2State\n\n\nclass LAMB(Optimizer2State):\n def __init__(\n self,\n params,\n lr=1e-3,\n bias_correction=True,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n adam_w_mode=True,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,","source_hash":"be62855caa66294b83930a82db74e69bde830af5aee35df479a7ea40c4480d9d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/__init__.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/__init__.py","kind":"file","name":"bitsandbytes/optim/__init__.py","path":"bitsandbytes/optim/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .adagrad import Adagrad, Adagrad8bit, Adagrad32bit\nfrom .adam import Adam, Adam8bit, Adam32bit, PagedAdam, PagedAdam8bit, PagedAdam32bit\nfrom .adamw import (\n AdamW,\n AdamW8bit,\n AdamW32bit,\n PagedAdamW,\n PagedAdamW8bit,\n PagedAdamW32bit,\n)\nfrom .ademamix import AdEMAMix, AdEMAMix8bit, AdEMAMix32bit, PagedAdEMAMix, PagedAdEMAMix8bit, PagedAdEMAMix32bit\nfrom .lamb import LAMB, LAMB8bit, LAMB32bit\nfrom .lars import LARS, LARS8bit, LARS32bit, PytorchLARS\nfrom .lion import Lion, Lion8bit, Lion32bit, PagedLion, PagedLion8bit, PagedLion32bit\nfrom .optimizer import GlobalOptimManager\nfrom .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit","source_hash":"d11a8cd54ea3c14f0509e78f0dc878404bb601911856bb4323cf651767752140","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/optimizer.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/optimizer.py","kind":"file","name":"bitsandbytes/optim/optimizer.py","path":"bitsandbytes/optim/optimizer.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom collections import abc as container_abcs, defaultdict\nfrom copy import deepcopy\nfrom itertools import chain\nfrom typing import Optional\n\nimport torch\n\nimport bitsandbytes.functional as F\nfrom bitsandbytes.utils import sync_gpu\n\n\nclass MockArgs:\n def __init__(self, initial_data):\n for key in initial_data:\n setattr(self, key, initial_data[key])\n\n","source_hash":"2d417f02c2f3b5c82ff950e0160a02f11dd859161428baea8de7e25d7e69493e","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/adagrad.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/adagrad.py","kind":"file","name":"bitsandbytes/optim/adagrad.py","path":"bitsandbytes/optim/adagrad.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass Adagrad(Optimizer1State):\n def __init__(\n self,\n params,\n lr=1e-2,\n lr_decay=0,\n weight_decay=0,\n initial_accumulator_value=0,\n eps=1e-10,\n optim_bits=32,\n args=None,\n min_8bit_size=4096,\n percentile_clipping=100,\n block_wise=True,","source_hash":"11bd8832311487e78a5bc7dba1aa9a166fd9b9f722f3b05315c271317baa287a","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/optim/lars.py","uri":"program://bitsandbytes/file/bitsandbytes/optim/lars.py","kind":"file","name":"bitsandbytes/optim/lars.py","path":"bitsandbytes/optim/lars.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nfrom torch.optim import Optimizer\n\nfrom bitsandbytes.optim.optimizer import Optimizer1State\n\n\nclass LARS(Optimizer1State):\n def __init__(\n self,\n params,\n lr,\n momentum=0,\n dampening=0,\n weight_decay=0,\n nesterov=False,\n optim_bits=32,\n args=None,","source_hash":"782114950adcf5c11033555c779362917f4485c6b603f3cd1b202669899cadef","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/diagnostics/cuda.py","uri":"program://bitsandbytes/file/bitsandbytes/diagnostics/cuda.py","kind":"file","name":"bitsandbytes/diagnostics/cuda.py","path":"bitsandbytes/diagnostics/cuda.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from collections.abc import Iterable, Iterator\nimport logging\nimport os\nfrom pathlib import Path\n\nimport torch\n\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT, get_cuda_bnb_library_path\nfrom bitsandbytes.cuda_specs import CUDASpecs\nfrom bitsandbytes.diagnostics.utils import print_dedented\n\nCUDART_PATH_PREFERRED_ENVVARS = (\"CONDA_PREFIX\", \"LD_LIBRARY_PATH\")\n\nCUDART_PATH_IGNORED_ENVVARS = {\n \"DBUS_SESSION_BUS_ADDRESS\", # hardware related\n \"GOOGLE_VM_CONFIG_LOCK_FILE\", # GCP: requires elevated permissions, causing problems in VMs and Jupyter notebooks\n \"HOME\", # Linux shell default\n \"LESSCLOSE\",\n \"LESSOPEN\", # related to the `less` command\n \"MAIL\", # something related to emails\n \"OLDPWD\",","source_hash":"c21668d64d87411c67e9ab3941007f95b4cede7758f55a9d2690013509d0d144","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/diagnostics/main.py","uri":"program://bitsandbytes/file/bitsandbytes/diagnostics/main.py","kind":"file","name":"bitsandbytes/diagnostics/main.py","path":"bitsandbytes/diagnostics/main.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import importlib\nimport platform\nimport sys\nimport traceback\n\nimport torch\n\nfrom bitsandbytes import __version__ as bnb_version\nfrom bitsandbytes.cextension import BNB_BACKEND\nfrom bitsandbytes.consts import PACKAGE_GITHUB_URL\nfrom bitsandbytes.cuda_specs import get_cuda_specs\nfrom bitsandbytes.diagnostics.cuda import (\n print_diagnostics,\n)\nfrom bitsandbytes.diagnostics.utils import print_dedented, print_header\n\n_RELATED_PACKAGES = [\n \"accelerate\",\n \"diffusers\",\n \"numpy\",\n \"pip\",","source_hash":"bb4642b3732c3fae5df6541390297413861ec536dc79db51dba3ba147d69e650","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/diagnostics/utils.py","uri":"program://bitsandbytes/file/bitsandbytes/diagnostics/utils.py","kind":"file","name":"bitsandbytes/diagnostics/utils.py","path":"bitsandbytes/diagnostics/utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":12,"code":"import textwrap\n\nHEADER_WIDTH = 60\n\n\ndef print_header(txt: str, width: int = HEADER_WIDTH, filler: str = \"=\") -> None:\n txt = f\" {txt} \" if txt else \"\"\n print(txt.center(width, filler))\n\n\ndef print_dedented(text):\n print(\"\\n\".join(textwrap.dedent(text).strip().split(\"\\n\")))","source_hash":"102888dbbf6387bf3defb45664520e9264c91b88353bf62240bc0fe2249aed3c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/nn/triton_based_modules.py","uri":"program://bitsandbytes/file/bitsandbytes/nn/triton_based_modules.py","kind":"file","name":"bitsandbytes/nn/triton_based_modules.py","path":"bitsandbytes/nn/triton_based_modules.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from functools import partial\n\nimport torch\nimport torch.nn as nn\n\nfrom bitsandbytes.triton.dequantize_rowwise import dequantize_rowwise\nfrom bitsandbytes.triton.int8_matmul_mixed_dequantize import (\n int8_matmul_mixed_dequantize,\n)\nfrom bitsandbytes.triton.int8_matmul_rowwise_dequantize import (\n int8_matmul_rowwise_dequantize,\n)\nfrom bitsandbytes.triton.quantize_columnwise_and_transpose import (\n quantize_columnwise_and_transpose,\n)\nfrom bitsandbytes.triton.quantize_global import (\n quantize_global,\n quantize_global_transpose,\n)\nfrom bitsandbytes.triton.quantize_rowwise import quantize_rowwise\nfrom bitsandbytes.triton.triton_utils import is_triton_available","source_hash":"e186a9e325247ad7dd9f2e986447659ba8d9102fd8e86155bda8cc712b03836d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/nn/parametrize.py","uri":"program://bitsandbytes/file/bitsandbytes/nn/parametrize.py","kind":"file","name":"bitsandbytes/nn/parametrize.py","path":"bitsandbytes/nn/parametrize.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from functools import partial\nfrom typing import Any, Literal, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils.parametrize as P\n\nfrom .. import functional as F\n\n\nclass Bnb4bitParametrization(nn.Module):\n \"\"\"\n A parametrization module that handles dequantization of a 4-bit quantized parameter.\n\n The parameter data is expected to be already quantized when this parametrization is applied.\n This module will dequantize the parameter data to its original floating-point representation\n when the forward method is called (i.e. when the parameter is accessed).\n\n Args:\n quant_state (`F.QuantState`):\n The quantization state containing the necessary information for dequantization.","source_hash":"e9fae2f99c5799a0bfef479aec29afc5fc2b135421e8f0bcb2ae2d8af6d62d5c","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/nn/__init__.py","uri":"program://bitsandbytes/file/bitsandbytes/nn/__init__.py","kind":"file","name":"bitsandbytes/nn/__init__.py","path":"bitsandbytes/nn/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom .modules import (\n Embedding,\n Embedding4bit,\n Embedding8bit,\n EmbeddingFP4,\n EmbeddingNF4,\n Int8Params,\n Linear4bit,\n Linear8bitLt,\n LinearFP4,\n LinearNF4,\n OutlierAwareLinear,\n Params4bit,\n StableEmbedding,\n SwitchBackLinearBnb,\n)\nfrom .triton_based_modules import (","source_hash":"3d79f9e8e4e7e90335d846038b9f946eeb8b7b5e8cf177edd9867ee8e4d96784","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/nn/modules.py","uri":"program://bitsandbytes/file/bitsandbytes/nn/modules.py","kind":"file","name":"bitsandbytes/nn/modules.py","path":"bitsandbytes/nn/modules.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport copy\nfrom typing import Any, Optional, TypeVar, Union, overload\nimport warnings\n\nimport torch\nfrom torch import Tensor, device, dtype, nn\nimport torch.nn.functional as F\n\nimport bitsandbytes as bnb\nfrom bitsandbytes.cextension import HIP_ENVIRONMENT\nfrom bitsandbytes.functional import QuantState\nfrom bitsandbytes.optim import GlobalOptimManager\nfrom bitsandbytes.utils import INVERSE_LINEAR_8BIT_WEIGHTS_FORMAT_MAPPING, OutlierTracer\n\nT = TypeVar(\"T\", bound=\"torch.nn.Module\")\n\n","source_hash":"980faaa0edbc7dc1be53f74f7352ce3592f3e726db71cf021d9fd004e5708fce","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/autograd/_functions.py","uri":"program://bitsandbytes/file/bitsandbytes/autograd/_functions.py","kind":"file","name":"bitsandbytes/autograd/_functions.py","path":"bitsandbytes/autograd/_functions.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from dataclasses import dataclass\nfrom math import prod\nfrom typing import Callable, Optional\nimport warnings\nfrom warnings import warn\n\nimport torch\nfrom typing_extensions import deprecated\n\nimport bitsandbytes.functional as F\n\n# The inverse transformation for the colTuring and colAmpere format were contributed by Alex Borzunov:\n# https://github.com/bigscience-workshop/petals/blob/main/src/petals/utils/linear8bitlt_patch.py\n\n\n\"\"\"\n This class pools outlier dimensions across layers.\n This is particularly important for small models where outlier features\n are less systematic and occur with low frequency.\n\"\"\"\n","source_hash":"f827d38aa68d400d32ca420569d2003add15aa7dc1fcd08af6c959997829dfad","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:bitsandbytes/autograd/__init__.py","uri":"program://bitsandbytes/file/bitsandbytes/autograd/__init__.py","kind":"file","name":"bitsandbytes/autograd/__init__.py","path":"bitsandbytes/autograd/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"from ._functions import get_inverse_transform_indices, undo_layout","source_hash":"e0ebcf8609c69a7eb013e0354d265ab6a2d77e7825a0b5afe6b3ab78a54460db","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:scripts/stale.py","uri":"program://bitsandbytes/file/scripts/stale.py","kind":"file","name":"scripts/stale.py","path":"scripts/stale.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nScript to close stale issue. Taken in part from the AllenNLP repository.\nhttps://github.com/allenai/allennlp.\n\"\"\"\n\nfrom datetime import datetime as dt, timezone\nimport os\n","source_hash":"e7ca72087ad971d1869fdd73a05313d17d47da46551cb713ed90de8c1b36a4d6","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/matmul_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/matmul_benchmark.py","kind":"file","name":"benchmarking/matmul_benchmark.py","path":"benchmarking/matmul_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nExtracted from tests/test_functional.py\n\nUsage: pytest benchmarking/matmul_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nimport torch\n\nimport bitsandbytes as bnb\nfrom bitsandbytes import functional as F\n\nk = 20\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\n\n\n@pytest.mark.parametrize(\n (\"batch\", \"seq\", \"model\", \"hidden\"),","source_hash":"1f22434d022a6e12736ab7cf38d579bcc7127ea324482581ec5fc1506637e5e7","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/inference_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/inference_benchmark.py","kind":"file","name":"benchmarking/inference_benchmark.py","path":"benchmarking/inference_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nInference benchmarking tool.\n\nRequirements:\n transformers\n accelerate\n bitsandbytes\n optimum-benchmark\n\nUsage: python inference_benchmark.py model_id\n\noptions:\n -h, --help show this help message and exit\n --configs {bf16,fp16,nf4,nf4-dq,int8,int8-decomp} [{bf16,fp16,nf4,nf4-dq,int8,int8-decomp} ...]\n --bf16\n --fp16\n --nf4\n --nf4-dq\n --int8\n --int8-decomp\n --batches BATCHES [BATCHES ...]","source_hash":"1ad11cf63293620045523c25941b31f7241e130fecbf13be0431ca3bfaf41654","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/optimizer_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/optimizer_benchmark.py","kind":"file","name":"benchmarking/optimizer_benchmark.py","path":"benchmarking/optimizer_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nExtracted from tests/test_optim.py\n\nUsage: pytest benchmarking/optimizer_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nfrom tests.helpers import describe_dtype, id_formatter\nimport torch\n\nimport bitsandbytes as bnb\n\nstr2optimizers = {\"paged_adamw\": (torch.optim.AdamW, bnb.optim.PagedAdamW)}\n\n\n@pytest.mark.parametrize(\"dim1\", [2 * 1024], ids=id_formatter(\"dim1\"))\n@pytest.mark.parametrize(\"gtype\", [torch.float16], ids=describe_dtype)\n@pytest.mark.parametrize(\"optim_name\", [\"paged_adamw\"], ids=id_formatter(\"optim_name\"))\n@pytest.mark.parametrize(\"mode\", [\"bnb\"], ids=id_formatter(\"mode\"))","source_hash":"975399726e1aa92fc294ed8ca4c8030ce6fb15ba063ac87c0b08459baebc715d","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/int8/training_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/int8/training_benchmark.py","kind":"file","name":"benchmarking/int8/training_benchmark.py","path":"benchmarking/int8/training_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nExtracted from tests/test_functional.py\n\nUsage: pytest benchmarking/int8/training_benchmark.py\n\"\"\"\n\nimport time\n\nimport pytest\nimport torch\n\nfrom bitsandbytes import functional as F\n\nk = 20\n\ntorch.set_printoptions(precision=5, sci_mode=False, linewidth=120, edgeitems=20, threshold=10000)\n\n\n@pytest.mark.parametrize(\n (\"batch\", \"seq\", \"model\", \"hidden\"),\n [","source_hash":"f600bf11bff09c2d74a050d019925c9c21b02de9fad7170225bcf4c3294e0f52","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/int8/int8_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/int8/int8_benchmark.py","kind":"file","name":"benchmarking/int8/int8_benchmark.py","path":"benchmarking/int8/int8_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nBasic benchmark for text generation.\n\nUsage: python benchmarking/int8/int8_benchmark.py\n\"\"\"\n\nimport time\n\nimport torch\nfrom torch.profiler import ProfilerActivity, profile\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\nMAX_NEW_TOKENS = 128\nmodel_name = \"meta-llama/Llama-3.1-8B\"\n\ntext = \"Below is a question. I need an answer.\\n\\nExplain machine learning: \"\ntokenizer = AutoTokenizer.from_pretrained(model_name)\ninput_ids = tokenizer([text] * 8, return_tensors=\"pt\").input_ids.to(0)\n\nmodel = AutoModelForCausalLM.from_pretrained(\n model_name,","source_hash":"4d333fc8e5a3de3e2baea077473bb1b8b2553f0a6ec4a19f4120cf86e278c555","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/switchback/make_plot_with_jsonl.py","uri":"program://bitsandbytes/file/benchmarking/switchback/make_plot_with_jsonl.py","kind":"file","name":"benchmarking/switchback/make_plot_with_jsonl.py","path":"benchmarking/switchback/make_plot_with_jsonl.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ncmap = plt.get_cmap(\"cool\")\n\nif __name__ == \"__main__\":\n fig = plt.figure(tight_layout=True, figsize=(12, 3.5))\n gs = gridspec.GridSpec(1, 2)\n\n dims_to_consider = [1024, 1280, 1408, 1664, 2048, 4096]\n batch_size_for_plot1 = 32768\n batch_sizes_for_plot2 = [2**14, 2**15, 2**16, 2**17]\n dims_to_xtick = [1024, 2048, 4096]\n logscale_plot1 = True\n\n ax = fig.add_subplot(gs[0, 0])\n\n # TODO: change this to what you want.\n rdf = pd.read_json(\"speed_benchmark/info_a100_py2.jsonl\", lines=True)\n df = rdf[rdf.batch_size == batch_size_for_plot1]","source_hash":"028048b8f65a244012688a9f6c7dbe8e6e334c7740180a58c8a9eb7d99f13313","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/switchback/speed_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/switchback/speed_benchmark.py","kind":"file","name":"benchmarking/switchback/speed_benchmark.py","path":"benchmarking/switchback/speed_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport time\n\nimport torch\n\nfrom bitsandbytes.triton.int8_matmul_mixed_dequantize import (\n int8_matmul_mixed_dequantize,\n)\nfrom bitsandbytes.triton.int8_matmul_rowwise_dequantize import (\n int8_matmul_rowwise_dequantize,\n)\nfrom bitsandbytes.triton.quantize_columnwise_and_transpose import (\n quantize_columnwise_and_transpose,\n)\nfrom bitsandbytes.triton.quantize_global import (\n quantize_global,\n quantize_global_transpose,\n)\nfrom bitsandbytes.triton.quantize_rowwise import quantize_rowwise\n\n# KNOW ISSUE: need to optimize \"w_quantize_colwise_transpose\" when embeddim is too large.","source_hash":"bfd34f8bab2afebcd911eea7c33f6e36f5fac2afefd39e649409071417cf7f91","truncated":false} {"repo_id":"bitsandbytes","entity_id":"file:benchmarking/xpu/inference_benchmark.py","uri":"program://bitsandbytes/file/benchmarking/xpu/inference_benchmark.py","kind":"file","name":"benchmarking/xpu/inference_benchmark.py","path":"benchmarking/xpu/inference_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import argparse\nimport time\n\n# import intel_extension_for_pytorch as ipex\nimport numpy as np\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig\n\nMAX_NEW_TOKENS = 256\n\nget_time = time.time\n\nsystem_prompt = \"You are a helpful assistant\"\nuser_prompt = \"\"\"Summarize this text please:\n\n```Tell me, O muse, of that ingenious hero who travelled far and wide after he had sacked the famous town of Troy. Many cities did he visit, and many were the nations with whose manners and customs he was acquainted; moreover he suffered much by sea while trying to save his own life and bring his men safely home; but do what he might he could not save his men, for they perished through their own sheer folly in eating the cattle of the Sun-god Hyperion; so the god prevented them from ever reaching home. Tell me, too, about all these things, O daughter of Jove, from whatsoever source you may know them.\n\nSo now all who escaped death in battle or by shipwreck had got safely home except Ulysses, and he, though he was longing to return to his wife and country, was detained by the goddess Calypso, who had got him into a large cave and wanted to marry him. But as years went by, there came a time when the gods settled that he should go back to Ithaca; even then, however, when he was among his own people, his troubles were not yet over; nevertheless all the gods had now begun to pity him except Neptune, who still persecuted him without ceasing and would not let him get home.\n\nNow Neptune had gone off to the Ethiopians, who are at the world's end, and lie in two halves, the one looking West and the other East. He had gone there to accept a hecatomb of sheep and oxen, and was enjoying himself at his festival; but the other gods met in the house of Olympian Jove, and the sire of gods and men spoke first. At that moment he was thinking of Aegisthus, who had been killed by Agamemnon's son Orestes; so he said to the other gods:\n","source_hash":"d97f8a6958c8be03759f3a6d868cc2f0438cd160d59d19f5bc000a5a62a4c290","truncated":false}