Kernels
danieldk HF Staff commited on
Commit
a49d0f7
Β·
verified Β·
1 Parent(s): 050dda7

Build uploaded using `kernels`.

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. build/torch210-cxx11-cu126-x86_64-linux/__init__.py +3 -0
  2. build/{torch28-cxx11-cu128-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu126-x86_64-linux}/_ops.py +3 -3
  3. build/{torch28-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch210-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} +2 -2
  4. build/{torch28-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu126-x86_64-linux}/custom_ops.py +0 -0
  5. build/torch210-cxx11-cu126-x86_64-linux/metadata.json +1 -0
  6. build/torch210-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +26 -0
  7. build/torch210-cxx11-cu128-x86_64-linux/__init__.py +3 -0
  8. build/{torch28-cxx11-cu129-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu128-x86_64-linux}/_ops.py +3 -3
  9. build/{torch28-cxx11-cu128-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch210-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} +2 -2
  10. build/{torch28-cxx11-cu128-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu128-x86_64-linux}/custom_ops.py +0 -0
  11. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +1 -0
  12. build/torch210-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +26 -0
  13. build/torch28-cxx11-cu126-x86_64-linux/__init__.py +3 -0
  14. build/{torch29-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu126-x86_64-linux}/_ops.py +3 -3
  15. build/{torch28-cxx11-cu129-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch28-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} +2 -2
  16. build/{torch28-cxx11-cu129-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu126-x86_64-linux}/custom_ops.py +0 -0
  17. build/torch28-cxx11-cu126-x86_64-linux/metadata.json +1 -0
  18. build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +25 -2
  19. build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  20. build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  21. build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  22. build/torch28-cxx11-cu128-x86_64-linux/__init__.py +3 -0
  23. build/{torch28-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu128-x86_64-linux}/_ops.py +3 -3
  24. build/{torch29-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch28-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} +2 -2
  25. build/{torch29-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu128-x86_64-linux}/custom_ops.py +0 -0
  26. build/torch28-cxx11-cu128-x86_64-linux/metadata.json +1 -0
  27. build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +25 -2
  28. build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  29. build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  30. build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  31. build/torch28-cxx11-cu129-x86_64-linux/__init__.py +3 -0
  32. build/torch28-cxx11-cu129-x86_64-linux/_ops.py +9 -0
  33. build/torch28-cxx11-cu129-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
  34. build/{torch29-cxx11-cu128-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu129-x86_64-linux}/custom_ops.py +0 -0
  35. build/torch28-cxx11-cu129-x86_64-linux/metadata.json +1 -0
  36. build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__init__.py +25 -2
  37. build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  38. build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  39. build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  40. build/torch29-cxx11-cu126-x86_64-linux/__init__.py +3 -0
  41. build/torch29-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  42. build/torch29-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
  43. build/torch29-cxx11-cu126-x86_64-linux/custom_ops.py +36 -0
  44. build/torch29-cxx11-cu126-x86_64-linux/metadata.json +1 -0
  45. build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +25 -2
  46. build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc +0 -0
  47. build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc +0 -0
  48. build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc +0 -0
  49. build/torch29-cxx11-cu128-x86_64-linux/__init__.py +3 -0
  50. build/torch29-cxx11-cu128-x86_64-linux/_ops.py +9 -0
build/torch210-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/{torch28-cxx11-cu128-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu126-x86_64-linux}/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_92f3139
3
- ops = torch.ops._quantization_eetq_92f3139
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_92f3139::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"
build/{torch28-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch210-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12d4712f6937fc8abaf1c30461318ac52913e3b836430b28e60b8ee07bd5490b
3
- size 31593848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ddd95d6bd5fa005afb8ca1cca98f8e666888437a7b21faa044d7e3180aa0902
3
+ size 39060392
build/{torch28-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu126-x86_64-linux}/custom_ops.py RENAMED
File without changes
build/torch210-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch210-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/{torch28-cxx11-cu129-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu128-x86_64-linux}/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_92f3139
3
- ops = torch.ops._quantization_eetq_92f3139
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_92f3139::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"
build/{torch28-cxx11-cu128-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch210-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ee78153171588c3a6e34fd000b19cf502b98fcd135f3348562aa77cc0eba4c8
3
- size 38012880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6026ada25ff3b5a4eb532b0e8a6825ef353d4c507f36817020557c68b22db2f8
3
+ size 45393976
build/{torch28-cxx11-cu128-x86_64-linux/quantization_eetq β†’ torch210-cxx11-cu128-x86_64-linux}/custom_ops.py RENAMED
File without changes
build/torch210-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch210-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/{torch29-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu126-x86_64-linux}/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_92f3139
3
- ops = torch.ops._quantization_eetq_92f3139
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_92f3139::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"
build/{torch28-cxx11-cu129-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch28-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bda61b1d3844adeff9e030c2bb933b7a6f1a112a95f3a2ca4723bf7c54496476
3
- size 38775000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a97b47d38cf622b58973de106d9b36fa9f41400c0de74a98c67b39d916d10410
3
+ size 39045544
build/{torch28-cxx11-cu129-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu126-x86_64-linux}/custom_ops.py RENAMED
File without changes
build/torch28-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py CHANGED
@@ -1,3 +1,26 @@
1
- from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
 
2
 
3
- __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
 
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc DELETED
Binary file (334 Bytes)
 
build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc DELETED
Binary file (547 Bytes)
 
build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc DELETED
Binary file (1.84 kB)
 
build/torch28-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/{torch28-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu128-x86_64-linux}/_ops.py RENAMED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _quantization_eetq_92f3139
3
- ops = torch.ops._quantization_eetq_92f3139
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_quantization_eetq_92f3139::{op_name}"
 
1
  import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"
build/{torch29-cxx11-cu126-x86_64-linux/quantization_eetq/_quantization_eetq_92f3139.abi3.so β†’ torch28-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17aa5f2536b515fa31541c80daa97fb01422f0ec9f4f4c9cc351c8cfaf3a32e9
3
- size 31593824
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdfe703aa8a570bb3589ec89aeca00af0a983b968700933127f2762f50c47686
3
+ size 45378576
build/{torch29-cxx11-cu126-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu128-x86_64-linux}/custom_ops.py RENAMED
File without changes
build/torch28-cxx11-cu128-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py CHANGED
@@ -1,3 +1,26 @@
1
- from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
 
2
 
3
- __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
 
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc DELETED
Binary file (334 Bytes)
 
build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc DELETED
Binary file (547 Bytes)
 
build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc DELETED
Binary file (1.84 kB)
 
build/torch28-cxx11-cu129-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch28-cxx11-cu129-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"
build/torch28-cxx11-cu129-x86_64-linux/_quantization_eetq_2019ec2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea1e527ad52b776e108df3ad077a84ec8db233a67630fa8389fa03a641b4a8c8
3
+ size 46492944
build/{torch29-cxx11-cu128-x86_64-linux/quantization_eetq β†’ torch28-cxx11-cu129-x86_64-linux}/custom_ops.py RENAMED
File without changes
build/torch28-cxx11-cu129-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__init__.py CHANGED
@@ -1,3 +1,26 @@
1
- from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
 
2
 
3
- __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
 
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc DELETED
Binary file (334 Bytes)
 
build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc DELETED
Binary file (547 Bytes)
 
build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc DELETED
Binary file (1.84 kB)
 
build/torch29-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch29-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"
build/torch29-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:232a741853f62f20c6d4e418644918b2f8c2b11c8ee97bf396d0c9e382656edf
3
+ size 39049616
build/torch29-cxx11-cu126-x86_64-linux/custom_ops.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import torch
3
+
4
+ from ._ops import ops
5
+
6
+
7
+ def w8_a16_gemm(
8
+ input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ return ops.w8_a16_gemm(input, weight, scale)
11
+
12
+
13
+ def w8_a16_gemm_(
14
+ input: torch.Tensor,
15
+ weight: torch.Tensor,
16
+ scale: torch.Tensor,
17
+ output: torch.Tensor,
18
+ m: int,
19
+ n: int,
20
+ k: int,
21
+ ) -> torch.Tensor:
22
+ return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
23
+
24
+
25
+ def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
26
+ return ops.preprocess_weights(origin_weight, is_int4)
27
+
28
+
29
+ def quant_weights(
30
+ origin_weight: torch.Tensor,
31
+ quant_type: torch.dtype,
32
+ return_unprocessed_quantized_tensor: bool,
33
+ ) -> List[torch.Tensor]:
34
+ return ops.quant_weights(
35
+ origin_weight, quant_type, return_unprocessed_quantized_tensor
36
+ )
build/torch29-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py CHANGED
@@ -1,3 +1,26 @@
1
- from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
 
2
 
3
- __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
 
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/__init__.cpython-313.pyc DELETED
Binary file (334 Bytes)
 
build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/_ops.cpython-313.pyc DELETED
Binary file (547 Bytes)
 
build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__pycache__/custom_ops.cpython-313.pyc DELETED
Binary file (1.84 kB)
 
build/torch29-cxx11-cu128-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
2
+
3
+ __all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
build/torch29-cxx11-cu128-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _quantization_eetq_2019ec2
3
+ ops = torch.ops._quantization_eetq_2019ec2
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_quantization_eetq_2019ec2::{op_name}"