Build uploaded using `kernels`.
Browse files- build/torch210-cxx11-cpu-x86_64-linux/__init__.py +0 -3
- build/torch210-cxx11-cpu-x86_64-linux/_ops.py +0 -9
- build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so +0 -3
- build/torch210-cxx11-cpu-x86_64-linux/custom_ops.py +0 -19
- build/torch210-cxx11-cpu-x86_64-linux/metadata.json +0 -1
- build/torch210-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +0 -26
- build/torch28-cxx11-cpu-x86_64-linux/__init__.py +0 -3
- build/torch28-cxx11-cpu-x86_64-linux/_ops.py +0 -9
- build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so +0 -3
- build/torch28-cxx11-cpu-x86_64-linux/custom_ops.py +0 -19
- build/torch28-cxx11-cpu-x86_64-linux/metadata.json +0 -1
- build/torch28-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +0 -26
- build/torch29-cxx11-cpu-x86_64-linux/__init__.py +0 -3
- build/torch29-cxx11-cpu-x86_64-linux/_ops.py +0 -9
- build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so +0 -3
- build/torch29-cxx11-cpu-x86_64-linux/custom_ops.py +0 -19
- build/torch29-cxx11-cpu-x86_64-linux/metadata.json +0 -1
- build/torch29-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py +0 -26
build/torch210-cxx11-cpu-x86_64-linux/__init__.py
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
from .custom_ops import gemm_int4_forward
|
| 2 |
-
|
| 3 |
-
__all__ = ["gemm_int4_forward"]
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cpu-x86_64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _quantization_gptq_d11f52b
|
| 3 |
-
ops = torch.ops._quantization_gptq_d11f52b
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_quantization_gptq_d11f52b::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:767d96c0b158a7ec52b2bdf2184ad52b0be314397073f2c20b70b6028bb93481
|
| 3 |
-
size 103168
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cpu-x86_64-linux/custom_ops.py
DELETED
|
@@ -1,19 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from ._ops import ops
|
| 3 |
-
|
| 4 |
-
def gemm_int4_forward(
|
| 5 |
-
input: torch.Tensor,
|
| 6 |
-
weight: torch.Tensor,
|
| 7 |
-
zeros: torch.Tensor,
|
| 8 |
-
absmax: torch.Tensor,
|
| 9 |
-
blocksize: int,
|
| 10 |
-
) -> torch.Tensor:
|
| 11 |
-
original_dtype = input.dtype
|
| 12 |
-
if original_dtype != torch.bfloat16:
|
| 13 |
-
input = input.to(torch.bfloat16)
|
| 14 |
-
|
| 15 |
-
output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
|
| 16 |
-
if original_dtype != torch.bfloat16:
|
| 17 |
-
output = output.to(original_dtype)
|
| 18 |
-
|
| 19 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch210-cxx11-cpu-x86_64-linux/metadata.json
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
{"python-depends":[]}
|
|
|
|
|
|
build/torch210-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import sys
|
| 3 |
-
|
| 4 |
-
import importlib
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
from types import ModuleType
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch28-cxx11-cpu-x86_64-linux/__init__.py
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
from .custom_ops import gemm_int4_forward
|
| 2 |
-
|
| 3 |
-
__all__ = ["gemm_int4_forward"]
|
|
|
|
|
|
|
|
|
|
|
|
build/torch28-cxx11-cpu-x86_64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _quantization_gptq_d11f52b
|
| 3 |
-
ops = torch.ops._quantization_gptq_d11f52b
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_quantization_gptq_d11f52b::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch28-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:267134d9dcfbeba26af7b319ab45ba960d8d754c9ad947cb798b6f100f4b1635
|
| 3 |
-
size 101912
|
|
|
|
|
|
|
|
|
|
|
|
build/torch28-cxx11-cpu-x86_64-linux/custom_ops.py
DELETED
|
@@ -1,19 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from ._ops import ops
|
| 3 |
-
|
| 4 |
-
def gemm_int4_forward(
|
| 5 |
-
input: torch.Tensor,
|
| 6 |
-
weight: torch.Tensor,
|
| 7 |
-
zeros: torch.Tensor,
|
| 8 |
-
absmax: torch.Tensor,
|
| 9 |
-
blocksize: int,
|
| 10 |
-
) -> torch.Tensor:
|
| 11 |
-
original_dtype = input.dtype
|
| 12 |
-
if original_dtype != torch.bfloat16:
|
| 13 |
-
input = input.to(torch.bfloat16)
|
| 14 |
-
|
| 15 |
-
output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
|
| 16 |
-
if original_dtype != torch.bfloat16:
|
| 17 |
-
output = output.to(original_dtype)
|
| 18 |
-
|
| 19 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch28-cxx11-cpu-x86_64-linux/metadata.json
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
{"python-depends":[]}
|
|
|
|
|
|
build/torch28-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import sys
|
| 3 |
-
|
| 4 |
-
import importlib
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
from types import ModuleType
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch29-cxx11-cpu-x86_64-linux/__init__.py
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
from .custom_ops import gemm_int4_forward
|
| 2 |
-
|
| 3 |
-
__all__ = ["gemm_int4_forward"]
|
|
|
|
|
|
|
|
|
|
|
|
build/torch29-cxx11-cpu-x86_64-linux/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _quantization_gptq_d11f52b
|
| 3 |
-
ops = torch.ops._quantization_gptq_d11f52b
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_quantization_gptq_d11f52b::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch29-cxx11-cpu-x86_64-linux/_quantization_gptq_d11f52b.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:a2fe7fdf542f03337c894db4dcc200647dfbde7d8451b9b81b3be8a15616055f
|
| 3 |
-
size 105960
|
|
|
|
|
|
|
|
|
|
|
|
build/torch29-cxx11-cpu-x86_64-linux/custom_ops.py
DELETED
|
@@ -1,19 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from ._ops import ops
|
| 3 |
-
|
| 4 |
-
def gemm_int4_forward(
|
| 5 |
-
input: torch.Tensor,
|
| 6 |
-
weight: torch.Tensor,
|
| 7 |
-
zeros: torch.Tensor,
|
| 8 |
-
absmax: torch.Tensor,
|
| 9 |
-
blocksize: int,
|
| 10 |
-
) -> torch.Tensor:
|
| 11 |
-
original_dtype = input.dtype
|
| 12 |
-
if original_dtype != torch.bfloat16:
|
| 13 |
-
input = input.to(torch.bfloat16)
|
| 14 |
-
|
| 15 |
-
output = ops.gemm_int4_forward(input, weight, zeros, absmax, blocksize)
|
| 16 |
-
if original_dtype != torch.bfloat16:
|
| 17 |
-
output = output.to(original_dtype)
|
| 18 |
-
|
| 19 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch29-cxx11-cpu-x86_64-linux/metadata.json
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
{"python-depends":[]}
|
|
|
|
|
|
build/torch29-cxx11-cpu-x86_64-linux/quantization_gptq/__init__.py
DELETED
|
@@ -1,26 +0,0 @@
|
|
| 1 |
-
import ctypes
|
| 2 |
-
import sys
|
| 3 |
-
|
| 4 |
-
import importlib
|
| 5 |
-
from pathlib import Path
|
| 6 |
-
from types import ModuleType
|
| 7 |
-
|
| 8 |
-
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
-
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
-
# it would also be used for other imports. So, we make a module name that
|
| 11 |
-
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
-
# the path.
|
| 13 |
-
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
-
module_name = path_hash
|
| 15 |
-
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
-
if spec is None:
|
| 17 |
-
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
-
module = importlib.util.module_from_spec(spec)
|
| 19 |
-
if module is None:
|
| 20 |
-
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
-
sys.modules[module_name] = module
|
| 22 |
-
spec.loader.exec_module(module) # type: ignore
|
| 23 |
-
return module
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|