danieldk HF Staff commited on
Commit
0b643c8
·
verified ·
1 Parent(s): 229d056

Build uploaded using `kernels`.

Browse files
build/torch210-cxx11-cpu-x86_64-linux/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .custom_ops import gemm_4bit_forward
2
-
3
- __all__ = ["gemm_4bit_forward"]
 
 
 
 
build/torch210-cxx11-cpu-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _quantization_bitsandbytes_5679cec
3
- ops = torch.ops._quantization_bitsandbytes_5679cec
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_quantization_bitsandbytes_5679cec::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cpu-x86_64-linux/_quantization_bitsandbytes_5679cec.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9514e46dcde591876368977ade28c2f1852077ce89f737bfd9a4567706395003
3
- size 146712
 
 
 
 
build/torch210-cxx11-cpu-x86_64-linux/custom_ops.py DELETED
@@ -1,19 +0,0 @@
1
- import torch
2
- from ._ops import ops
3
-
4
- def gemm_4bit_forward(
5
- input: torch.Tensor,
6
- weight: torch.Tensor,
7
- absmax: torch.Tensor,
8
- blocksize: int,
9
- quant_type: int,
10
- ) -> torch.Tensor:
11
- original_dtype = input.dtype
12
- if original_dtype != torch.bfloat16:
13
- input = input.to(torch.bfloat16)
14
-
15
- output = ops.gemm_4bit_forward(input, weight, absmax, blocksize, quant_type)
16
- if original_dtype != torch.bfloat16:
17
- output = output.to(original_dtype)
18
-
19
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cpu-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch210-cxx11-cpu-x86_64-linux/quantization_bitsandbytes/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cpu-x86_64-linux/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .custom_ops import gemm_4bit_forward
2
-
3
- __all__ = ["gemm_4bit_forward"]
 
 
 
 
build/torch28-cxx11-cpu-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _quantization_bitsandbytes_5679cec
3
- ops = torch.ops._quantization_bitsandbytes_5679cec
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_quantization_bitsandbytes_5679cec::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cpu-x86_64-linux/_quantization_bitsandbytes_5679cec.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5a60273f0883e1bfee176864d939b3835fd7360336c8a7c985df178a4aac483
3
- size 141024
 
 
 
 
build/torch28-cxx11-cpu-x86_64-linux/custom_ops.py DELETED
@@ -1,19 +0,0 @@
1
- import torch
2
- from ._ops import ops
3
-
4
- def gemm_4bit_forward(
5
- input: torch.Tensor,
6
- weight: torch.Tensor,
7
- absmax: torch.Tensor,
8
- blocksize: int,
9
- quant_type: int,
10
- ) -> torch.Tensor:
11
- original_dtype = input.dtype
12
- if original_dtype != torch.bfloat16:
13
- input = input.to(torch.bfloat16)
14
-
15
- output = ops.gemm_4bit_forward(input, weight, absmax, blocksize, quant_type)
16
- if original_dtype != torch.bfloat16:
17
- output = output.to(original_dtype)
18
-
19
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cpu-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch28-cxx11-cpu-x86_64-linux/quantization_bitsandbytes/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cpu-x86_64-linux/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .custom_ops import gemm_4bit_forward
2
-
3
- __all__ = ["gemm_4bit_forward"]
 
 
 
 
build/torch29-cxx11-cpu-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _quantization_bitsandbytes_5679cec
3
- ops = torch.ops._quantization_bitsandbytes_5679cec
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_quantization_bitsandbytes_5679cec::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cpu-x86_64-linux/_quantization_bitsandbytes_5679cec.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:44b98e143bc9306dc5e4b5db487af05520152f44e4c5b93cdc19a4322d3f716f
3
- size 141056
 
 
 
 
build/torch29-cxx11-cpu-x86_64-linux/custom_ops.py DELETED
@@ -1,19 +0,0 @@
1
- import torch
2
- from ._ops import ops
3
-
4
- def gemm_4bit_forward(
5
- input: torch.Tensor,
6
- weight: torch.Tensor,
7
- absmax: torch.Tensor,
8
- blocksize: int,
9
- quant_type: int,
10
- ) -> torch.Tensor:
11
- original_dtype = input.dtype
12
- if original_dtype != torch.bfloat16:
13
- input = input.to(torch.bfloat16)
14
-
15
- output = ops.gemm_4bit_forward(input, weight, absmax, blocksize, quant_type)
16
- if original_dtype != torch.bfloat16:
17
- output = output.to(original_dtype)
18
-
19
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cpu-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch29-cxx11-cpu-x86_64-linux/quantization_bitsandbytes/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))