Kernels
danieldk HF Staff commited on
Commit
c3edf9c
·
verified ·
1 Parent(s): 7e1681b

Build uploaded using `kernels`.

Browse files
Files changed (49) hide show
  1. .gitattributes +8 -0
  2. build/torch210-cxx11-cpu-aarch64-linux/__init__.py +16 -0
  3. build/torch210-cxx11-cpu-aarch64-linux/_ops.py +9 -0
  4. build/torch210-cxx11-cpu-aarch64-linux/_relu_cpu_6261c06.abi3.so +3 -0
  5. build/torch210-cxx11-cpu-aarch64-linux/layers/__init__.py +11 -0
  6. build/torch210-cxx11-cpu-aarch64-linux/metadata.json +5 -0
  7. build/torch210-cxx11-cpu-aarch64-linux/relu/__init__.py +26 -0
  8. build/torch210-cxx11-cu126-aarch64-linux/__init__.py +16 -0
  9. build/torch210-cxx11-cu126-aarch64-linux/_ops.py +9 -0
  10. build/torch210-cxx11-cu126-aarch64-linux/_relu_cuda_6261c06.abi3.so +3 -0
  11. build/torch210-cxx11-cu126-aarch64-linux/layers/__init__.py +11 -0
  12. build/torch210-cxx11-cu126-aarch64-linux/metadata.json +18 -0
  13. build/torch210-cxx11-cu126-aarch64-linux/relu/__init__.py +26 -0
  14. build/torch210-cxx11-cu128-aarch64-linux/__init__.py +16 -0
  15. build/torch210-cxx11-cu128-aarch64-linux/_ops.py +9 -0
  16. build/torch210-cxx11-cu128-aarch64-linux/_relu_cuda_6261c06.abi3.so +3 -0
  17. build/torch210-cxx11-cu128-aarch64-linux/layers/__init__.py +11 -0
  18. build/torch210-cxx11-cu128-aarch64-linux/metadata.json +21 -0
  19. build/torch210-cxx11-cu128-aarch64-linux/relu/__init__.py +26 -0
  20. build/torch210-cxx11-cu130-aarch64-linux/__init__.py +16 -0
  21. build/torch210-cxx11-cu130-aarch64-linux/_ops.py +9 -0
  22. build/torch210-cxx11-cu130-aarch64-linux/_relu_cuda_6261c06.abi3.so +3 -0
  23. build/torch210-cxx11-cu130-aarch64-linux/layers/__init__.py +11 -0
  24. build/torch210-cxx11-cu130-aarch64-linux/metadata.json +19 -0
  25. build/torch210-cxx11-cu130-aarch64-linux/relu/__init__.py +26 -0
  26. build/torch29-cxx11-cpu-aarch64-linux/__init__.py +16 -0
  27. build/torch29-cxx11-cpu-aarch64-linux/_ops.py +9 -0
  28. build/torch29-cxx11-cpu-aarch64-linux/_relu_cpu_6261c06.abi3.so +3 -0
  29. build/torch29-cxx11-cpu-aarch64-linux/layers/__init__.py +11 -0
  30. build/torch29-cxx11-cpu-aarch64-linux/metadata.json +5 -0
  31. build/torch29-cxx11-cpu-aarch64-linux/relu/__init__.py +26 -0
  32. build/torch29-cxx11-cu126-aarch64-linux/__init__.py +16 -0
  33. build/torch29-cxx11-cu126-aarch64-linux/_ops.py +9 -0
  34. build/torch29-cxx11-cu126-aarch64-linux/_relu_cuda_6261c06.abi3.so +3 -0
  35. build/torch29-cxx11-cu126-aarch64-linux/layers/__init__.py +11 -0
  36. build/torch29-cxx11-cu126-aarch64-linux/metadata.json +18 -0
  37. build/torch29-cxx11-cu126-aarch64-linux/relu/__init__.py +26 -0
  38. build/torch29-cxx11-cu128-aarch64-linux/__init__.py +16 -0
  39. build/torch29-cxx11-cu128-aarch64-linux/_ops.py +9 -0
  40. build/torch29-cxx11-cu128-aarch64-linux/_relu_cuda_6261c06.abi3.so +3 -0
  41. build/torch29-cxx11-cu128-aarch64-linux/layers/__init__.py +11 -0
  42. build/torch29-cxx11-cu128-aarch64-linux/metadata.json +21 -0
  43. build/torch29-cxx11-cu128-aarch64-linux/relu/__init__.py +26 -0
  44. build/torch29-cxx11-cu130-aarch64-linux/__init__.py +16 -0
  45. build/torch29-cxx11-cu130-aarch64-linux/_ops.py +9 -0
  46. build/torch29-cxx11-cu130-aarch64-linux/_relu_cuda_6261c06.abi3.so +3 -0
  47. build/torch29-cxx11-cu130-aarch64-linux/layers/__init__.py +11 -0
  48. build/torch29-cxx11-cu130-aarch64-linux/metadata.json +19 -0
  49. build/torch29-cxx11-cu130-aarch64-linux/relu/__init__.py +26 -0
.gitattributes CHANGED
@@ -250,3 +250,11 @@ build/torch210-xpu20253-x86_64-windows/_relu_xpu_bd0179a.pyd filter=lfs diff=lfs
250
  build/torch210-cu128-x86_64-windows/_relu_cuda_d91a431.pyd filter=lfs diff=lfs merge=lfs -text
251
  build/torch210-xpu20253-x86_64-windows/_relu_xpu_d91a431.pyd filter=lfs diff=lfs merge=lfs -text
252
  build/torch210-metal-aarch64-darwin/_relu_metal_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
250
  build/torch210-cu128-x86_64-windows/_relu_cuda_d91a431.pyd filter=lfs diff=lfs merge=lfs -text
251
  build/torch210-xpu20253-x86_64-windows/_relu_xpu_d91a431.pyd filter=lfs diff=lfs merge=lfs -text
252
  build/torch210-metal-aarch64-darwin/_relu_metal_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
253
+ build/torch210-cxx11-cpu-aarch64-linux/_relu_cpu_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
254
+ build/torch210-cxx11-cu126-aarch64-linux/_relu_cuda_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
255
+ build/torch210-cxx11-cu128-aarch64-linux/_relu_cuda_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
256
+ build/torch210-cxx11-cu130-aarch64-linux/_relu_cuda_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
257
+ build/torch29-cxx11-cpu-aarch64-linux/_relu_cpu_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
258
+ build/torch29-cxx11-cu126-aarch64-linux/_relu_cuda_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
259
+ build/torch29-cxx11-cu128-aarch64-linux/_relu_cuda_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
260
+ build/torch29-cxx11-cu130-aarch64-linux/_relu_cuda_6261c06.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cpu-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch210-cxx11-cpu-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cpu_6261c06
3
+ ops = torch.ops._relu_cpu_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cpu_6261c06::{op_name}"
build/torch210-cxx11-cpu-aarch64-linux/_relu_cpu_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4a447fd999d3560323e510da358696da51c23ba3eef87e1fb8b57683a2a8f68
3
+ size 2025352
build/torch210-cxx11-cpu-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch210-cxx11-cpu-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": []
5
+ }
build/torch210-cxx11-cpu-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu126-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch210-cxx11-cu126-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cuda_6261c06
3
+ ops = torch.ops._relu_cuda_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cuda_6261c06::{op_name}"
build/torch210-cxx11-cu126-aarch64-linux/_relu_cuda_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f55e81ff893a372fe86ad6443371abe2eb30ee2dc9e8d16ce7cc32309adc21d
3
+ size 2103912
build/torch210-cxx11-cu126-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch210-cxx11-cu126-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "7.0",
9
+ "7.2",
10
+ "7.5",
11
+ "8.0",
12
+ "8.6",
13
+ "8.7",
14
+ "8.9",
15
+ "9.0+PTX"
16
+ ]
17
+ }
18
+ }
build/torch210-cxx11-cu126-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu128-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch210-cxx11-cu128-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cuda_6261c06
3
+ ops = torch.ops._relu_cuda_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cuda_6261c06::{op_name}"
build/torch210-cxx11-cu128-aarch64-linux/_relu_cuda_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb4280ecde7decc222502fc79ed7dce04a1a3a4c6ba410140898d549cc475fd
3
+ size 2235240
build/torch210-cxx11-cu128-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch210-cxx11-cu128-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "10.1",
10
+ "12.0+PTX",
11
+ "7.0",
12
+ "7.2",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }
build/torch210-cxx11-cu128-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-cu130-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch210-cxx11-cu130-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cuda_6261c06
3
+ ops = torch.ops._relu_cuda_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cuda_6261c06::{op_name}"
build/torch210-cxx11-cu130-aarch64-linux/_relu_cuda_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c6ce11cdbee562207b55c3bab5faeaf98c7e1fa87bcdcef231ccbd8005bb561
3
+ size 2236952
build/torch210-cxx11-cu130-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch210-cxx11-cu130-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "11.0",
10
+ "12.0+PTX",
11
+ "7.5",
12
+ "8.0",
13
+ "8.6",
14
+ "8.7",
15
+ "8.9",
16
+ "9.0"
17
+ ]
18
+ }
19
+ }
build/torch210-cxx11-cu130-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cpu-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch29-cxx11-cpu-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cpu_6261c06
3
+ ops = torch.ops._relu_cpu_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cpu_6261c06::{op_name}"
build/torch29-cxx11-cpu-aarch64-linux/_relu_cpu_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4a69b4b60cdde8c27497aa5dc37ba4ee0e736b833c34ae84a2da09cb869090e
3
+ size 2024136
build/torch29-cxx11-cpu-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch29-cxx11-cpu-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": []
5
+ }
build/torch29-cxx11-cpu-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu126-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch29-cxx11-cu126-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cuda_6261c06
3
+ ops = torch.ops._relu_cuda_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cuda_6261c06::{op_name}"
build/torch29-cxx11-cu126-aarch64-linux/_relu_cuda_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0da6a76fa50ddd780d807f638a7b122d4e21ec41383e977621a8e675921efe39
3
+ size 2101784
build/torch29-cxx11-cu126-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch29-cxx11-cu126-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "7.0",
9
+ "7.2",
10
+ "7.5",
11
+ "8.0",
12
+ "8.6",
13
+ "8.7",
14
+ "8.9",
15
+ "9.0+PTX"
16
+ ]
17
+ }
18
+ }
build/torch29-cxx11-cu126-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu128-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch29-cxx11-cu128-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cuda_6261c06
3
+ ops = torch.ops._relu_cuda_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cuda_6261c06::{op_name}"
build/torch29-cxx11-cu128-aarch64-linux/_relu_cuda_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f16d75e275c8d0cf86c119e8e7acdf0ed35a97f6d14097808f0a6407d56a9246
3
+ size 2232968
build/torch29-cxx11-cu128-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch29-cxx11-cu128-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "10.1",
10
+ "12.0+PTX",
11
+ "7.0",
12
+ "7.2",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }
build/torch29-cxx11-cu128-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cu130-aarch64-linux/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+
5
+ from ._ops import ops
6
+
7
+ from . import layers
8
+
9
+
10
+ def relu(x: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor:
11
+ if out is None:
12
+ out = torch.empty_like(x)
13
+ ops.relu(out, x)
14
+ return out
15
+
16
+ __all__ = ["relu", "layers"]
build/torch29-cxx11-cu130-aarch64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _relu_cuda_6261c06
3
+ ops = torch.ops._relu_cuda_6261c06
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_relu_cuda_6261c06::{op_name}"
build/torch29-cxx11-cu130-aarch64-linux/_relu_cuda_6261c06.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c59120f175cd03135e9ef7058f0293d500d0e43585a0965857e6b4be9d6f90fb
3
+ size 2234680
build/torch29-cxx11-cu130-aarch64-linux/layers/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .._ops import ops
5
+
6
+
7
+ class ReLU(nn.Module):
8
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
9
+ out = torch.empty_like(x)
10
+ ops.relu(out, x)
11
+ return out
build/torch29-cxx11-cu130-aarch64-linux/metadata.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": 1,
3
+ "license": "Apache-2.0",
4
+ "python-depends": [],
5
+ "backend": {
6
+ "type": "cuda",
7
+ "archs": [
8
+ "10.0",
9
+ "11.0",
10
+ "12.0+PTX",
11
+ "7.5",
12
+ "8.0",
13
+ "8.6",
14
+ "8.7",
15
+ "8.9",
16
+ "9.0"
17
+ ]
18
+ }
19
+ }
build/torch29-cxx11-cu130-aarch64-linux/relu/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))