Build uploaded using `kernels`.
Browse files- .gitattributes +6 -0
- build/torch210-cxx11-cpu-x86_64-linux/__init__.py +14 -1
- build/torch210-cxx11-cpu-x86_64-linux/_ops.py +3 -3
- build/torch210-cxx11-cpu-x86_64-linux/{_rmsnorm_a8702c9.abi3.so → _rmsnorm_235cde1.abi3.so} +2 -2
- build/torch210-cxx11-cpu-x86_64-linux/layers.py +24 -1
- build/torch210-cxx11-xpu20253-x86_64-linux/__init__.py +14 -1
- build/torch210-cxx11-xpu20253-x86_64-linux/_ops.py +3 -3
- build/torch210-cxx11-xpu20253-x86_64-linux/{_rmsnorm_a8702c9.abi3.so → _rmsnorm_235cde1.abi3.so} +2 -2
- build/torch210-cxx11-xpu20253-x86_64-linux/layers.py +24 -1
- build/torch28-cxx11-cpu-x86_64-linux/__init__.py +14 -1
- build/torch28-cxx11-cpu-x86_64-linux/_ops.py +3 -3
- build/{torch29-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so → torch28-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so} +2 -2
- build/torch28-cxx11-cpu-x86_64-linux/layers.py +24 -1
- build/torch28-cxx11-xpu20251-x86_64-linux/__init__.py +14 -1
- build/torch28-cxx11-xpu20251-x86_64-linux/_ops.py +3 -3
- build/torch28-cxx11-xpu20251-x86_64-linux/{_rmsnorm_a8702c9.abi3.so → _rmsnorm_235cde1.abi3.so} +2 -2
- build/torch28-cxx11-xpu20251-x86_64-linux/layers.py +24 -1
- build/torch29-cxx11-cpu-x86_64-linux/__init__.py +14 -1
- build/torch29-cxx11-cpu-x86_64-linux/_ops.py +3 -3
- build/{torch28-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so → torch29-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so} +2 -2
- build/torch29-cxx11-cpu-x86_64-linux/layers.py +24 -1
- build/torch29-cxx11-xpu20252-x86_64-linux/__init__.py +14 -1
- build/torch29-cxx11-xpu20252-x86_64-linux/_ops.py +3 -3
- build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_235cde1.abi3.so +3 -0
- build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_a8702c9.abi3.so +0 -3
- build/torch29-cxx11-xpu20252-x86_64-linux/layers.py +24 -1
.gitattributes
CHANGED
|
@@ -64,3 +64,9 @@ build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lf
|
|
| 64 |
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 65 |
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 66 |
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 65 |
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 66 |
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_a8702c9.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 67 |
+
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 68 |
+
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 69 |
+
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 70 |
+
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 71 |
+
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 72 |
+
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_235cde1.abi3.so filter=lfs diff=lfs merge=lfs -text
|
build/torch210-cxx11-cpu-x86_64-linux/__init__.py
CHANGED
|
@@ -4,11 +4,24 @@ from ._ops import ops
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
|
|
|
| 7 |
return ops.apply_rms_norm(
|
| 8 |
input,
|
| 9 |
weight,
|
| 10 |
eps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
|
| 13 |
-
__all__ = ["layers", "
|
| 14 |
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
return ops.apply_rms_norm(
|
| 9 |
input,
|
| 10 |
weight,
|
| 11 |
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
)
|
| 25 |
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
|
build/torch210-cxx11-cpu-x86_64-linux/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/torch210-cxx11-cpu-x86_64-linux/{_rmsnorm_a8702c9.abi3.so → _rmsnorm_235cde1.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08b2b07a7190f17e6463890cec75f84f96b10235e6daafc6adc3d4807c868607
|
| 3 |
+
size 158864
|
build/torch210-cxx11-cpu-x86_64-linux/layers.py
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
class RMSNorm(torch.nn.Module):
|
| 5 |
"""
|
| 6 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
@@ -27,7 +50,7 @@ class RMSNorm(torch.nn.Module):
|
|
| 27 |
Returns:
|
| 28 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
"""
|
| 30 |
-
return
|
| 31 |
hidden_states,
|
| 32 |
self.weight,
|
| 33 |
self.variance_epsilon,
|
|
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
class RMSNorm(torch.nn.Module):
|
| 28 |
"""
|
| 29 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
|
|
| 50 |
Returns:
|
| 51 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
hidden_states,
|
| 55 |
self.weight,
|
| 56 |
self.variance_epsilon,
|
build/torch210-cxx11-xpu20253-x86_64-linux/__init__.py
CHANGED
|
@@ -4,11 +4,24 @@ from ._ops import ops
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
|
|
|
| 7 |
return ops.apply_rms_norm(
|
| 8 |
input,
|
| 9 |
weight,
|
| 10 |
eps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
|
| 13 |
-
__all__ = ["layers", "
|
| 14 |
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
return ops.apply_rms_norm(
|
| 9 |
input,
|
| 10 |
weight,
|
| 11 |
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
)
|
| 25 |
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
|
build/torch210-cxx11-xpu20253-x86_64-linux/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/torch210-cxx11-xpu20253-x86_64-linux/{_rmsnorm_a8702c9.abi3.so → _rmsnorm_235cde1.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a3adbeed741dcfb0ae701761009dba190d6432383906831741cedc133a519d7
|
| 3 |
+
size 104793352
|
build/torch210-cxx11-xpu20253-x86_64-linux/layers.py
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
class RMSNorm(torch.nn.Module):
|
| 5 |
"""
|
| 6 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
@@ -27,7 +50,7 @@ class RMSNorm(torch.nn.Module):
|
|
| 27 |
Returns:
|
| 28 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
"""
|
| 30 |
-
return
|
| 31 |
hidden_states,
|
| 32 |
self.weight,
|
| 33 |
self.variance_epsilon,
|
|
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
class RMSNorm(torch.nn.Module):
|
| 28 |
"""
|
| 29 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
|
|
| 50 |
Returns:
|
| 51 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
hidden_states,
|
| 55 |
self.weight,
|
| 56 |
self.variance_epsilon,
|
build/torch28-cxx11-cpu-x86_64-linux/__init__.py
CHANGED
|
@@ -4,11 +4,24 @@ from ._ops import ops
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
|
|
|
| 7 |
return ops.apply_rms_norm(
|
| 8 |
input,
|
| 9 |
weight,
|
| 10 |
eps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
|
| 13 |
-
__all__ = ["layers", "
|
| 14 |
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
return ops.apply_rms_norm(
|
| 9 |
input,
|
| 10 |
weight,
|
| 11 |
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
)
|
| 25 |
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
|
build/torch28-cxx11-cpu-x86_64-linux/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/{torch29-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so → torch28-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16c92de9cefabeeadc60ffff87189a1e66ecb9ea19b343570ac55e9d9c7d98fe
|
| 3 |
+
size 156648
|
build/torch28-cxx11-cpu-x86_64-linux/layers.py
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
class RMSNorm(torch.nn.Module):
|
| 5 |
"""
|
| 6 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
@@ -27,7 +50,7 @@ class RMSNorm(torch.nn.Module):
|
|
| 27 |
Returns:
|
| 28 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
"""
|
| 30 |
-
return
|
| 31 |
hidden_states,
|
| 32 |
self.weight,
|
| 33 |
self.variance_epsilon,
|
|
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
class RMSNorm(torch.nn.Module):
|
| 28 |
"""
|
| 29 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
|
|
| 50 |
Returns:
|
| 51 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
hidden_states,
|
| 55 |
self.weight,
|
| 56 |
self.variance_epsilon,
|
build/torch28-cxx11-xpu20251-x86_64-linux/__init__.py
CHANGED
|
@@ -4,11 +4,24 @@ from ._ops import ops
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
|
|
|
| 7 |
return ops.apply_rms_norm(
|
| 8 |
input,
|
| 9 |
weight,
|
| 10 |
eps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
|
| 13 |
-
__all__ = ["layers", "
|
| 14 |
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
return ops.apply_rms_norm(
|
| 9 |
input,
|
| 10 |
weight,
|
| 11 |
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
)
|
| 25 |
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
|
build/torch28-cxx11-xpu20251-x86_64-linux/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/torch28-cxx11-xpu20251-x86_64-linux/{_rmsnorm_a8702c9.abi3.so → _rmsnorm_235cde1.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c4b43d63dc74b210633da81630023a6d6e359a7a1115bff55da9f4436053d9
|
| 3 |
+
size 103700632
|
build/torch28-cxx11-xpu20251-x86_64-linux/layers.py
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
class RMSNorm(torch.nn.Module):
|
| 5 |
"""
|
| 6 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
@@ -27,7 +50,7 @@ class RMSNorm(torch.nn.Module):
|
|
| 27 |
Returns:
|
| 28 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
"""
|
| 30 |
-
return
|
| 31 |
hidden_states,
|
| 32 |
self.weight,
|
| 33 |
self.variance_epsilon,
|
|
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
class RMSNorm(torch.nn.Module):
|
| 28 |
"""
|
| 29 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
|
|
| 50 |
Returns:
|
| 51 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
hidden_states,
|
| 55 |
self.weight,
|
| 56 |
self.variance_epsilon,
|
build/torch29-cxx11-cpu-x86_64-linux/__init__.py
CHANGED
|
@@ -4,11 +4,24 @@ from ._ops import ops
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
|
|
|
| 7 |
return ops.apply_rms_norm(
|
| 8 |
input,
|
| 9 |
weight,
|
| 10 |
eps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
|
| 13 |
-
__all__ = ["layers", "
|
| 14 |
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
return ops.apply_rms_norm(
|
| 9 |
input,
|
| 10 |
weight,
|
| 11 |
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
)
|
| 25 |
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
|
build/torch29-cxx11-cpu-x86_64-linux/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/{torch28-cxx11-cpu-x86_64-linux/_rmsnorm_a8702c9.abi3.so → torch29-cxx11-cpu-x86_64-linux/_rmsnorm_235cde1.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e9ea3ebe5949d376bb44127a66dc2946e72620ff7035e2f34e81a652b0c69ded
|
| 3 |
+
size 156608
|
build/torch29-cxx11-cpu-x86_64-linux/layers.py
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
class RMSNorm(torch.nn.Module):
|
| 5 |
"""
|
| 6 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
@@ -27,7 +50,7 @@ class RMSNorm(torch.nn.Module):
|
|
| 27 |
Returns:
|
| 28 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
"""
|
| 30 |
-
return
|
| 31 |
hidden_states,
|
| 32 |
self.weight,
|
| 33 |
self.variance_epsilon,
|
|
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
class RMSNorm(torch.nn.Module):
|
| 28 |
"""
|
| 29 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
|
|
| 50 |
Returns:
|
| 51 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
hidden_states,
|
| 55 |
self.weight,
|
| 56 |
self.variance_epsilon,
|
build/torch29-cxx11-xpu20252-x86_64-linux/__init__.py
CHANGED
|
@@ -4,11 +4,24 @@ from ._ops import ops
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
|
|
|
| 7 |
return ops.apply_rms_norm(
|
| 8 |
input,
|
| 9 |
weight,
|
| 10 |
eps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
)
|
| 12 |
|
| 13 |
-
__all__ = ["layers", "
|
| 14 |
|
|
|
|
| 4 |
|
| 5 |
|
| 6 |
def apply_rms_norm(input, weight, eps):
|
| 7 |
+
# ops.apply_rms_norm returns [output, rstd]
|
| 8 |
return ops.apply_rms_norm(
|
| 9 |
input,
|
| 10 |
weight,
|
| 11 |
eps,
|
| 12 |
+
)[0]
|
| 13 |
+
|
| 14 |
+
def apply_rms_norm_backward(grad_output, input, weight, output, rstd, eps, input_requires_grad=True, weight_requires_grad=True):
|
| 15 |
+
return ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
input,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
eps,
|
| 22 |
+
input_requires_grad,
|
| 23 |
+
weight_requires_grad
|
| 24 |
)
|
| 25 |
|
| 26 |
+
__all__ = ["layers", "apply_rms_norm_forward", "apply_rms_norm_backward"]
|
| 27 |
|
build/torch29-cxx11-xpu20252-x86_64-linux/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _rmsnorm_235cde1
|
| 3 |
+
ops = torch.ops._rmsnorm_235cde1
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_rmsnorm_235cde1::{op_name}"
|
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_235cde1.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f9cf81702c9b0a75c84f961b1b7555db936dbdd7b889c1a41dd048c1100a0aa2
|
| 3 |
+
size 102179528
|
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_a8702c9.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:e00f18c2a5222d5782f37d052526d32ba71de2c28a271de5c85a5a8fb0efe6fa
|
| 3 |
-
size 102340240
|
|
|
|
|
|
|
|
|
|
|
|
build/torch29-cxx11-xpu20252-x86_64-linux/layers.py
CHANGED
|
@@ -1,6 +1,29 @@
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
class RMSNorm(torch.nn.Module):
|
| 5 |
"""
|
| 6 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
@@ -27,7 +50,7 @@ class RMSNorm(torch.nn.Module):
|
|
| 27 |
Returns:
|
| 28 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 29 |
"""
|
| 30 |
-
return
|
| 31 |
hidden_states,
|
| 32 |
self.weight,
|
| 33 |
self.variance_epsilon,
|
|
|
|
| 1 |
import torch
|
| 2 |
from ._ops import ops
|
| 3 |
|
| 4 |
+
class RMSNormFunction(torch.autograd.Function):
|
| 5 |
+
@staticmethod
|
| 6 |
+
def forward(ctx, hidden_states, weight, variance_epsilon):
|
| 7 |
+
ctx.variance_epsilon = variance_epsilon
|
| 8 |
+
output, rstd = ops.apply_rms_norm(hidden_states, weight, variance_epsilon)
|
| 9 |
+
ctx.save_for_backward(hidden_states, weight, output, rstd)
|
| 10 |
+
return output
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def backward(ctx, grad_output):
|
| 14 |
+
hidden_states, weight, output, rstd = ctx.saved_tensors
|
| 15 |
+
grads = ops.apply_rms_norm_backward(
|
| 16 |
+
grad_output,
|
| 17 |
+
hidden_states,
|
| 18 |
+
weight,
|
| 19 |
+
output,
|
| 20 |
+
rstd,
|
| 21 |
+
ctx.variance_epsilon,
|
| 22 |
+
ctx.needs_input_grad[0],
|
| 23 |
+
ctx.needs_input_grad[1]
|
| 24 |
+
)
|
| 25 |
+
return grads[0], grads[1], None
|
| 26 |
+
|
| 27 |
class RMSNorm(torch.nn.Module):
|
| 28 |
"""
|
| 29 |
RMSNorm module that uses the optimized LigerRMSNormFunction.
|
|
|
|
| 50 |
Returns:
|
| 51 |
torch.Tensor: Normalized tensor of the same shape as input
|
| 52 |
"""
|
| 53 |
+
return RMSNormFunction.apply(
|
| 54 |
hidden_states,
|
| 55 |
self.weight,
|
| 56 |
self.variance_epsilon,
|