Kernels
kernels-bot commited on
Commit
512d95d
·
verified ·
1 Parent(s): 86001ee

Uploaded using `kernel-builder`.

Browse files
build/torch210-cxx11-cu126-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_cuda_86f75d9
3
- ops = torch.ops._quantization_eetq_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_cuda_2d6119c
3
+ ops = torch.ops._quantization_eetq_cuda_2d6119c
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_cuda_2d6119c::{op_name}"
build/torch210-cxx11-cu126-aarch64-linux/{_quantization_eetq_cuda_86f75d9.abi3.so → _quantization_eetq_cuda_2d6119c.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c76662fb7202c7f7410dbfcaf69e8b92c77545859171d26481d2d37b27201a4
3
  size 39010048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40d168ca6634a23dded922ae1bd52d8e1a2ad30701de31e7e54c826979cf5512
3
  size 39010048
build/torch210-cxx11-cu126-aarch64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "quantization-eetq",
3
- "id": "_quantization_eetq_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "quantization-eetq",
3
+ "id": "_quantization_eetq_cuda_2d6119c",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/torch210-cxx11-cu128-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_cuda_86f75d9
3
- ops = torch.ops._quantization_eetq_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_cuda_2d6119c
3
+ ops = torch.ops._quantization_eetq_cuda_2d6119c
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_cuda_2d6119c::{op_name}"
build/torch210-cxx11-cu128-aarch64-linux/{_quantization_eetq_cuda_86f75d9.abi3.so → _quantization_eetq_cuda_2d6119c.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c88c95f4b6331e387278fbbf05c0dd7f39efd6bc262ef34c9bdc63b40f53489f
3
  size 45366048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f185271e38f588a714e8b360fc7491df3a9592c330c58e805879767f05f92d0
3
  size 45366048
build/torch210-cxx11-cu128-aarch64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "quantization-eetq",
3
- "id": "_quantization_eetq_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "quantization-eetq",
3
+ "id": "_quantization_eetq_cuda_2d6119c",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/torch211-cxx11-cu126-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_cuda_86f75d9
3
- ops = torch.ops._quantization_eetq_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_cuda_2d6119c
3
+ ops = torch.ops._quantization_eetq_cuda_2d6119c
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_cuda_2d6119c::{op_name}"
build/torch211-cxx11-cu126-aarch64-linux/{_quantization_eetq_cuda_86f75d9.abi3.so → _quantization_eetq_cuda_2d6119c.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e09ba83a7a2cf4bad5120571b73251e2fe54097c03b13a356d674fcf25108c1c
3
  size 39006256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9166428182ff8357a8124b5fa9b45557867419cd7095c16264c0d7b4a8b1985c
3
  size 39006256
build/torch211-cxx11-cu126-aarch64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "quantization-eetq",
3
- "id": "_quantization_eetq_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "quantization-eetq",
3
+ "id": "_quantization_eetq_cuda_2d6119c",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/torch211-cxx11-cu128-aarch64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_cuda_86f75d9
3
- ops = torch.ops._quantization_eetq_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_cuda_2d6119c
3
+ ops = torch.ops._quantization_eetq_cuda_2d6119c
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_cuda_2d6119c::{op_name}"
build/torch211-cxx11-cu128-aarch64-linux/{_quantization_eetq_cuda_86f75d9.abi3.so → _quantization_eetq_cuda_2d6119c.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:74fac81d793d214112fc6f20f13cd69dd64e745e20d88a6807c6da033646d6da
3
  size 45296712
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1eb5bfd99bc0c2ad9f430b85f50d5e5075103bd63f0561bf2678472d7101e97
3
  size 45296712
build/torch211-cxx11-cu128-aarch64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "quantization-eetq",
3
- "id": "_quantization_eetq_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "quantization-eetq",
3
+ "id": "_quantization_eetq_cuda_2d6119c",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/torch212-cxx11-cu126-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch212-cxx11-cu126-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_cuda_2d6119c
3
+ ops = torch.ops._quantization_eetq_cuda_2d6119c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_cuda_2d6119c::{op_name}"
build/torch212-cxx11-cu126-aarch64-linux/_quantization_eetq_cuda_2d6119c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fb6c323866edae12a279ad9d01ebe1565424693e9cb5b8e4713bd8e72d09818
3
+ size 39009824
build/torch212-cxx11-cu126-aarch64-linux/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )
build/torch212-cxx11-cu126-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "quantization-eetq",
3
+ "id": "_quantization_eetq_cuda_2d6119c",
4
+ "version": 1,
5
+ "license": "Apache-2.0",
6
+ "python-depends": [],
7
+ "backend": {
8
+ "type": "cuda",
9
+ "archs": [
10
+ "7.0",
11
+ "7.2",
12
+ "7.5",
13
+ "8.0",
14
+ "8.6",
15
+ "8.7",
16
+ "8.9",
17
+ "9.0+PTX"
18
+ ]
19
+ }
20
+ }
build/torch212-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))