danieldk HF Staff commited on
Commit
95dc24b
·
1 Parent(s): 4af0473

Revert "Build uploaded using `kernels`."

Browse files

This reverts commit 4af047397cb185bf22fba4ed19d0bd4ae50b8055.

Files changed (36) hide show
  1. build/torch210-cxx11-cpu-x86_64-linux/__init__.py +14 -0
  2. build/torch210-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  3. build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so +3 -0
  4. build/torch210-cxx11-cpu-x86_64-linux/layers.py +36 -0
  5. build/torch210-cxx11-cpu-x86_64-linux/metadata.json +1 -0
  6. build/torch210-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py +26 -0
  7. build/torch210-cxx11-xpu20253-x86_64-linux/__init__.py +14 -0
  8. build/torch210-cxx11-xpu20253-x86_64-linux/_ops.py +9 -0
  9. build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_fb26d8c.abi3.so +3 -0
  10. build/torch210-cxx11-xpu20253-x86_64-linux/layers.py +36 -0
  11. build/torch210-cxx11-xpu20253-x86_64-linux/metadata.json +1 -0
  12. build/torch210-cxx11-xpu20253-x86_64-linux/rmsnorm/__init__.py +26 -0
  13. build/torch28-cxx11-cpu-x86_64-linux/__init__.py +14 -0
  14. build/torch28-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  15. build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so +3 -0
  16. build/torch28-cxx11-cpu-x86_64-linux/layers.py +36 -0
  17. build/torch28-cxx11-cpu-x86_64-linux/metadata.json +1 -0
  18. build/torch28-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py +26 -0
  19. build/torch28-cxx11-xpu20251-x86_64-linux/__init__.py +14 -0
  20. build/torch28-cxx11-xpu20251-x86_64-linux/_ops.py +9 -0
  21. build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_fb26d8c.abi3.so +3 -0
  22. build/torch28-cxx11-xpu20251-x86_64-linux/layers.py +36 -0
  23. build/torch28-cxx11-xpu20251-x86_64-linux/metadata.json +1 -0
  24. build/torch28-cxx11-xpu20251-x86_64-linux/rmsnorm/__init__.py +26 -0
  25. build/torch29-cxx11-cpu-x86_64-linux/__init__.py +14 -0
  26. build/torch29-cxx11-cpu-x86_64-linux/_ops.py +9 -0
  27. build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so +3 -0
  28. build/torch29-cxx11-cpu-x86_64-linux/layers.py +36 -0
  29. build/torch29-cxx11-cpu-x86_64-linux/metadata.json +1 -0
  30. build/torch29-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py +26 -0
  31. build/torch29-cxx11-xpu20252-x86_64-linux/__init__.py +14 -0
  32. build/torch29-cxx11-xpu20252-x86_64-linux/_ops.py +9 -0
  33. build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_fb26d8c.abi3.so +3 -0
  34. build/torch29-cxx11-xpu20252-x86_64-linux/layers.py +36 -0
  35. build/torch29-cxx11-xpu20252-x86_64-linux/metadata.json +1 -0
  36. build/torch29-cxx11-xpu20252-x86_64-linux/rmsnorm/__init__.py +26 -0
build/torch210-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import layers
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ def apply_rms_norm(input, weight, eps):
7
+ return ops.apply_rms_norm(
8
+ input,
9
+ weight,
10
+ eps,
11
+ )
12
+
13
+ __all__ = ["layers", "apply_rms_norm"]
14
+
build/torch210-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rmsnorm_fb26d8c
3
+ ops = torch.ops._rmsnorm_fb26d8c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rmsnorm_fb26d8c::{op_name}"
build/torch210-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8a1c744d46b5b0b6455825653741008b06242630ae9946f0205ac2c055dbc7e
3
+ size 326352
build/torch210-cxx11-cpu-x86_64-linux/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ class RMSNorm(torch.nn.Module):
5
+ """
6
+ RMSNorm module that uses the optimized LigerRMSNormFunction.
7
+
8
+ Args:
9
+ hidden_size (int): The size of the hidden dimension.
10
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
11
+ offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
12
+ casting_mode (str, optional): The casting mode to use. Defaults to "llama".
13
+ in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
14
+ """
15
+
16
+
17
+ weight: torch.Tensor
18
+ variance_epsilon: float
19
+
20
+ def forward(self, hidden_states):
21
+ """
22
+ Apply RMS normalization to the input tensor.
23
+
24
+ Args:
25
+ hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
26
+
27
+ Returns:
28
+ torch.Tensor: Normalized tensor of the same shape as input
29
+ """
30
+ return ops.apply_rms_norm(
31
+ hidden_states,
32
+ self.weight,
33
+ self.variance_epsilon,
34
+ )
35
+
36
+ __all__ = ["RMSNorm"]
build/torch210-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch210-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch210-cxx11-xpu20253-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import layers
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ def apply_rms_norm(input, weight, eps):
7
+ return ops.apply_rms_norm(
8
+ input,
9
+ weight,
10
+ eps,
11
+ )
12
+
13
+ __all__ = ["layers", "apply_rms_norm"]
14
+
build/torch210-cxx11-xpu20253-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rmsnorm_fb26d8c
3
+ ops = torch.ops._rmsnorm_fb26d8c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rmsnorm_fb26d8c::{op_name}"
build/torch210-cxx11-xpu20253-x86_64-linux/_rmsnorm_fb26d8c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4be94737423cc4d02f4be83f38144614d71ccd8672d96699f0b10136dd541847
3
+ size 104941392
build/torch210-cxx11-xpu20253-x86_64-linux/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ class RMSNorm(torch.nn.Module):
5
+ """
6
+ RMSNorm module that uses the optimized LigerRMSNormFunction.
7
+
8
+ Args:
9
+ hidden_size (int): The size of the hidden dimension.
10
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
11
+ offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
12
+ casting_mode (str, optional): The casting mode to use. Defaults to "llama".
13
+ in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
14
+ """
15
+
16
+
17
+ weight: torch.Tensor
18
+ variance_epsilon: float
19
+
20
+ def forward(self, hidden_states):
21
+ """
22
+ Apply RMS normalization to the input tensor.
23
+
24
+ Args:
25
+ hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
26
+
27
+ Returns:
28
+ torch.Tensor: Normalized tensor of the same shape as input
29
+ """
30
+ return ops.apply_rms_norm(
31
+ hidden_states,
32
+ self.weight,
33
+ self.variance_epsilon,
34
+ )
35
+
36
+ __all__ = ["RMSNorm"]
build/torch210-cxx11-xpu20253-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch210-cxx11-xpu20253-x86_64-linux/rmsnorm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import layers
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ def apply_rms_norm(input, weight, eps):
7
+ return ops.apply_rms_norm(
8
+ input,
9
+ weight,
10
+ eps,
11
+ )
12
+
13
+ __all__ = ["layers", "apply_rms_norm"]
14
+
build/torch28-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rmsnorm_fb26d8c
3
+ ops = torch.ops._rmsnorm_fb26d8c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rmsnorm_fb26d8c::{op_name}"
build/torch28-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:055fc9c5e82e48e503963bac3da30001e128774d8d9a333680b8aacab0650644
3
+ size 324616
build/torch28-cxx11-cpu-x86_64-linux/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ class RMSNorm(torch.nn.Module):
5
+ """
6
+ RMSNorm module that uses the optimized LigerRMSNormFunction.
7
+
8
+ Args:
9
+ hidden_size (int): The size of the hidden dimension.
10
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
11
+ offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
12
+ casting_mode (str, optional): The casting mode to use. Defaults to "llama".
13
+ in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
14
+ """
15
+
16
+
17
+ weight: torch.Tensor
18
+ variance_epsilon: float
19
+
20
+ def forward(self, hidden_states):
21
+ """
22
+ Apply RMS normalization to the input tensor.
23
+
24
+ Args:
25
+ hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
26
+
27
+ Returns:
28
+ torch.Tensor: Normalized tensor of the same shape as input
29
+ """
30
+ return ops.apply_rms_norm(
31
+ hidden_states,
32
+ self.weight,
33
+ self.variance_epsilon,
34
+ )
35
+
36
+ __all__ = ["RMSNorm"]
build/torch28-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch28-cxx11-xpu20251-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import layers
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ def apply_rms_norm(input, weight, eps):
7
+ return ops.apply_rms_norm(
8
+ input,
9
+ weight,
10
+ eps,
11
+ )
12
+
13
+ __all__ = ["layers", "apply_rms_norm"]
14
+
build/torch28-cxx11-xpu20251-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rmsnorm_fb26d8c
3
+ ops = torch.ops._rmsnorm_fb26d8c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rmsnorm_fb26d8c::{op_name}"
build/torch28-cxx11-xpu20251-x86_64-linux/_rmsnorm_fb26d8c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0ba9e0355977f76b16f6346377026ffde2977c613ee9b5633083d6f95f4e07c
3
+ size 103861336
build/torch28-cxx11-xpu20251-x86_64-linux/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ class RMSNorm(torch.nn.Module):
5
+ """
6
+ RMSNorm module that uses the optimized LigerRMSNormFunction.
7
+
8
+ Args:
9
+ hidden_size (int): The size of the hidden dimension.
10
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
11
+ offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
12
+ casting_mode (str, optional): The casting mode to use. Defaults to "llama".
13
+ in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
14
+ """
15
+
16
+
17
+ weight: torch.Tensor
18
+ variance_epsilon: float
19
+
20
+ def forward(self, hidden_states):
21
+ """
22
+ Apply RMS normalization to the input tensor.
23
+
24
+ Args:
25
+ hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
26
+
27
+ Returns:
28
+ torch.Tensor: Normalized tensor of the same shape as input
29
+ """
30
+ return ops.apply_rms_norm(
31
+ hidden_states,
32
+ self.weight,
33
+ self.variance_epsilon,
34
+ )
35
+
36
+ __all__ = ["RMSNorm"]
build/torch28-cxx11-xpu20251-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch28-cxx11-xpu20251-x86_64-linux/rmsnorm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-cpu-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import layers
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ def apply_rms_norm(input, weight, eps):
7
+ return ops.apply_rms_norm(
8
+ input,
9
+ weight,
10
+ eps,
11
+ )
12
+
13
+ __all__ = ["layers", "apply_rms_norm"]
14
+
build/torch29-cxx11-cpu-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rmsnorm_fb26d8c
3
+ ops = torch.ops._rmsnorm_fb26d8c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rmsnorm_fb26d8c::{op_name}"
build/torch29-cxx11-cpu-x86_64-linux/_rmsnorm_fb26d8c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97de49bd6f5edb8a54394a123362f30026a119e4f8ccf796884f108c343ec562
3
+ size 324592
build/torch29-cxx11-cpu-x86_64-linux/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ class RMSNorm(torch.nn.Module):
5
+ """
6
+ RMSNorm module that uses the optimized LigerRMSNormFunction.
7
+
8
+ Args:
9
+ hidden_size (int): The size of the hidden dimension.
10
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
11
+ offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
12
+ casting_mode (str, optional): The casting mode to use. Defaults to "llama".
13
+ in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
14
+ """
15
+
16
+
17
+ weight: torch.Tensor
18
+ variance_epsilon: float
19
+
20
+ def forward(self, hidden_states):
21
+ """
22
+ Apply RMS normalization to the input tensor.
23
+
24
+ Args:
25
+ hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
26
+
27
+ Returns:
28
+ torch.Tensor: Normalized tensor of the same shape as input
29
+ """
30
+ return ops.apply_rms_norm(
31
+ hidden_states,
32
+ self.weight,
33
+ self.variance_epsilon,
34
+ )
35
+
36
+ __all__ = ["RMSNorm"]
build/torch29-cxx11-cpu-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch29-cxx11-cpu-x86_64-linux/rmsnorm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch29-cxx11-xpu20252-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import layers
2
+
3
+ from ._ops import ops
4
+
5
+
6
+ def apply_rms_norm(input, weight, eps):
7
+ return ops.apply_rms_norm(
8
+ input,
9
+ weight,
10
+ eps,
11
+ )
12
+
13
+ __all__ = ["layers", "apply_rms_norm"]
14
+
build/torch29-cxx11-xpu20252-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _rmsnorm_fb26d8c
3
+ ops = torch.ops._rmsnorm_fb26d8c
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_rmsnorm_fb26d8c::{op_name}"
build/torch29-cxx11-xpu20252-x86_64-linux/_rmsnorm_fb26d8c.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47dcb713294a6eca6d920f2e9aba27be280d75ac2d356845232008210d1df17a
3
+ size 102340240
build/torch29-cxx11-xpu20252-x86_64-linux/layers.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ._ops import ops
3
+
4
+ class RMSNorm(torch.nn.Module):
5
+ """
6
+ RMSNorm module that uses the optimized LigerRMSNormFunction.
7
+
8
+ Args:
9
+ hidden_size (int): The size of the hidden dimension.
10
+ eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-6.
11
+ offset (float, optional): Offset value to shift the weight tensor. Defaults to 0.0.
12
+ casting_mode (str, optional): The casting mode to use. Defaults to "llama".
13
+ in_place (bool, optional): Whether to modify dY in-place to store dX during backward. Defaults to True.
14
+ """
15
+
16
+
17
+ weight: torch.Tensor
18
+ variance_epsilon: float
19
+
20
+ def forward(self, hidden_states):
21
+ """
22
+ Apply RMS normalization to the input tensor.
23
+
24
+ Args:
25
+ hidden_states (torch.Tensor): Input tensor of shape (B, T, H) or (BxT, H)
26
+
27
+ Returns:
28
+ torch.Tensor: Normalized tensor of the same shape as input
29
+ """
30
+ return ops.apply_rms_norm(
31
+ hidden_states,
32
+ self.weight,
33
+ self.variance_epsilon,
34
+ )
35
+
36
+ __all__ = ["RMSNorm"]
build/torch29-cxx11-xpu20252-x86_64-linux/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"python-depends":[]}
build/torch29-cxx11-xpu20252-x86_64-linux/rmsnorm/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import sys
3
+
4
+ import importlib
5
+ from pathlib import Path
6
+ from types import ModuleType
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))