Kernels
iamwyldecat commited on
Commit
4b70498
·
1 Parent(s): 44e9845

chore(poly-norm): add ROCm build artifacts

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.so filter=lfs diff=lfs merge=lfs -text
build/torch26-cxx11-rocm62-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from . import layers
4
+ from ._ops import ops
5
+ from .poly_norm import PolyNormFunction
6
+
7
+
8
+ def poly_norm(
9
+ x: torch.Tensor,
10
+ weight: torch.Tensor,
11
+ bias: torch.Tensor,
12
+ eps: float = 1e-6,
13
+ ) -> None:
14
+ return PolyNormFunction.apply(x, weight, bias, eps)
15
+
16
+
17
+ __all__ = [
18
+ "poly_norm",
19
+ "layers",
20
+ "ops",
21
+ ]
build/torch26-cxx11-rocm62-x86_64-linux/activation/_activation_44e9845_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:527e5aac540e24dc3791fd423fea23f687ea3cffdb627c6a6e35f4df1aa7dec4
3
+ size 2460736
build/torch26-cxx11-rocm62-x86_64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_44e9845_dirty
3
+ ops = torch.ops._activation_44e9845_dirty
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_44e9845_dirty::{op_name}"
build/torch26-cxx11-rocm62-x86_64-linux/activation/layers.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .poly_norm import PolyNormFunction
5
+
6
+
7
+ class PolyNorm(nn.Module):
8
+ def __init__(self, eps):
9
+ super().__init__()
10
+ self.weight = torch.nn.Parameter(torch.ones(3) / 3)
11
+ self.bias = torch.nn.Parameter(torch.zeros(1))
12
+ self.eps = eps
13
+
14
+ def forward(
15
+ self,
16
+ x: torch.Tensor,
17
+ ):
18
+ return PolyNormFunction.apply(x, self.weight, self.bias, self.eps)
build/torch26-cxx11-rocm62-x86_64-linux/activation/poly_norm.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ # Inherit from Function
7
+ class PolyNormFunction(torch.autograd.Function):
8
+ # Note that forward, setup_context, and backward are @staticmethods
9
+ @staticmethod
10
+ def forward(input, weight, bias, eps):
11
+ output = torch.empty_like(input)
12
+ ops.poly_norm(output, input, weight, bias, eps)
13
+ return output
14
+
15
+ @staticmethod
16
+ # inputs is a Tuple of all of the inputs passed to forward.
17
+ # output is the output of the forward().
18
+ def setup_context(ctx, inputs, output):
19
+ input, weight, bias, eps = inputs
20
+ ctx.save_for_backward(input, weight)
21
+ ctx.eps = eps
22
+
23
+ # This function has only a single output, so it gets only one gradient
24
+ @staticmethod
25
+ def backward(ctx, output_grad):
26
+ input, weight = ctx.saved_tensors
27
+ eps = ctx.eps
28
+
29
+ input_grad = torch.empty_like(input) if ctx.needs_input_grad[0] else None
30
+ weight_grad = torch.empty_like(weight) if ctx.needs_input_grad[1] else None
31
+ bias_grad = (
32
+ torch.empty(1, dtype=weight.dtype, device=weight.device)
33
+ if ctx.needs_input_grad[2]
34
+ else None
35
+ )
36
+
37
+ ops.poly_norm_backward(
38
+ input_grad, weight_grad, bias_grad, output_grad, input, weight, eps
39
+ )
40
+
41
+ return input_grad, weight_grad, bias_grad, None
build/torch27-cxx11-rocm63-x86_64-linux/activation/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from . import layers
4
+ from ._ops import ops
5
+ from .poly_norm import PolyNormFunction
6
+
7
+
8
+ def poly_norm(
9
+ x: torch.Tensor,
10
+ weight: torch.Tensor,
11
+ bias: torch.Tensor,
12
+ eps: float = 1e-6,
13
+ ) -> None:
14
+ return PolyNormFunction.apply(x, weight, bias, eps)
15
+
16
+
17
+ __all__ = [
18
+ "poly_norm",
19
+ "layers",
20
+ "ops",
21
+ ]
build/torch27-cxx11-rocm63-x86_64-linux/activation/_activation_44e9845_dirty.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3e00863b72834e1d121e377e41724b1479703051aed7d9d8a64019d6a92bf54
3
+ size 2447432
build/torch27-cxx11-rocm63-x86_64-linux/activation/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _activation_44e9845_dirty
3
+ ops = torch.ops._activation_44e9845_dirty
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_activation_44e9845_dirty::{op_name}"
build/torch27-cxx11-rocm63-x86_64-linux/activation/layers.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ from .poly_norm import PolyNormFunction
5
+
6
+
7
+ class PolyNorm(nn.Module):
8
+ def __init__(self, eps):
9
+ super().__init__()
10
+ self.weight = torch.nn.Parameter(torch.ones(3) / 3)
11
+ self.bias = torch.nn.Parameter(torch.zeros(1))
12
+ self.eps = eps
13
+
14
+ def forward(
15
+ self,
16
+ x: torch.Tensor,
17
+ ):
18
+ return PolyNormFunction.apply(x, self.weight, self.bias, self.eps)
build/torch27-cxx11-rocm63-x86_64-linux/activation/poly_norm.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ # Inherit from Function
7
+ class PolyNormFunction(torch.autograd.Function):
8
+ # Note that forward, setup_context, and backward are @staticmethods
9
+ @staticmethod
10
+ def forward(input, weight, bias, eps):
11
+ output = torch.empty_like(input)
12
+ ops.poly_norm(output, input, weight, bias, eps)
13
+ return output
14
+
15
+ @staticmethod
16
+ # inputs is a Tuple of all of the inputs passed to forward.
17
+ # output is the output of the forward().
18
+ def setup_context(ctx, inputs, output):
19
+ input, weight, bias, eps = inputs
20
+ ctx.save_for_backward(input, weight)
21
+ ctx.eps = eps
22
+
23
+ # This function has only a single output, so it gets only one gradient
24
+ @staticmethod
25
+ def backward(ctx, output_grad):
26
+ input, weight = ctx.saved_tensors
27
+ eps = ctx.eps
28
+
29
+ input_grad = torch.empty_like(input) if ctx.needs_input_grad[0] else None
30
+ weight_grad = torch.empty_like(weight) if ctx.needs_input_grad[1] else None
31
+ bias_grad = (
32
+ torch.empty(1, dtype=weight.dtype, device=weight.device)
33
+ if ctx.needs_input_grad[2]
34
+ else None
35
+ )
36
+
37
+ ops.poly_norm_backward(
38
+ input_grad, weight_grad, bias_grad, output_grad, input, weight, eps
39
+ )
40
+
41
+ return input_grad, weight_grad, bias_grad, None