medmekk HF Staff commited on
Commit
4a273f4
·
verified ·
1 Parent(s): 084e4d5

Build uploaded using `kernels`.

Browse files
.gitattributes CHANGED
@@ -35,3 +35,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  build/torch28-metal-aarch64-darwin/mlx_rmsnorm/_mlx_rmsnorm_97571a8_dirty.abi3.so filter=lfs diff=lfs merge=lfs -text
37
  build/torch29-metal-aarch64-darwin/mlx_rmsnorm/_mlx_rmsnorm_97571a8_dirty.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  build/torch28-metal-aarch64-darwin/mlx_rmsnorm/_mlx_rmsnorm_97571a8_dirty.abi3.so filter=lfs diff=lfs merge=lfs -text
37
  build/torch29-metal-aarch64-darwin/mlx_rmsnorm/_mlx_rmsnorm_97571a8_dirty.abi3.so filter=lfs diff=lfs merge=lfs -text
38
+ build/torch28-metal-aarch64-darwin/mlx_rmsnorm/_mlx_rmsnorm_57d865f.abi3.so filter=lfs diff=lfs merge=lfs -text
39
+ build/torch29-metal-aarch64-darwin/mlx_rmsnorm/_mlx_rmsnorm_57d865f.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__init__.py CHANGED
@@ -1,25 +1,3 @@
1
- from ._ops import ops
2
 
3
- import torch
4
-
5
-
6
- def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
7
- original_shape = x.shape
8
- x = x.view(-1, x.shape[-1])
9
- weight = weight.view(-1)
10
- output = torch.zeros_like(x)
11
- ops.launch_forward_kernel(x, weight, output, epsilon)
12
- output = output.view(original_shape)
13
- return output
14
-
15
- def rmsnorm_backward(x: torch.Tensor, weight: torch.Tensor, grad_output: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
16
- original_shape = x.shape
17
- x = x.view(-1, x.shape[-1])
18
- weight = weight.view(-1)
19
- grad_output = grad_output.view(-1)
20
- grad_input = torch.zeros_like(x)
21
- grad_weight = torch.zeros_like(weight)
22
- ops.launch_backward_kernel(x, weight, grad_output, grad_input, grad_weight, epsilon)
23
- grad_input = grad_input.view(original_shape)
24
- grad_weight = grad_weight.view(original_shape)
25
- return grad_input, grad_weight
 
1
+ from .layers import RMSNorm, rmsnorm_forward, rmsnorm_backward
2
 
3
+ __all__ = ["RMSNorm", "rmsnorm_forward", "rmsnorm_backward"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/__init__.cpython-313.pyc and b/build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/__init__.cpython-313.pyc differ
 
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/_ops.cpython-313.pyc and b/build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/_ops.cpython-313.pyc differ
 
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/layers.cpython-313.pyc ADDED
Binary file (2.6 kB). View file
 
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/{_mlx_rmsnorm_97571a8_dirty.abi3.so → _mlx_rmsnorm_57d865f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9067324afd250e29f55291830a02f3cd197a559ecb38262770ea31206c5cb1b
3
- size 219216
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22dcc9b26a28ff2c1cc34697bd25bf55eeb9cd9d4f1eb32a352ed2343fecbe6d
3
+ size 219168
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mlx_rmsnorm_97571a8_dirty
3
- ops = torch.ops._mlx_rmsnorm_97571a8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mlx_rmsnorm_97571a8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _mlx_rmsnorm_57d865f
3
+ ops = torch.ops._mlx_rmsnorm_57d865f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mlx_rmsnorm_57d865f::{op_name}"
build/torch28-metal-aarch64-darwin/mlx_rmsnorm/layers.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+
3
+ import torch
4
+
5
+ class RMSNorm(torch.nn.Module):
6
+ weight: torch.Tensor
7
+ eps: float
8
+
9
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
10
+ return rmsnorm_forward(x, self.weight, self.eps)
11
+
12
+
13
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
14
+ original_shape = x.shape
15
+ x = x.view(-1, x.shape[-1])
16
+ weight = weight.view(-1)
17
+ output = torch.zeros_like(x)
18
+ ops.launch_forward_kernel(x, weight, output, epsilon)
19
+ output = output.view(original_shape)
20
+ return output
21
+
22
+ def rmsnorm_backward(x: torch.Tensor, weight: torch.Tensor, grad_output: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
23
+ original_shape = x.shape
24
+ x = x.view(-1, x.shape[-1])
25
+ weight = weight.view(-1)
26
+ grad_output = grad_output.view(-1)
27
+ grad_input = torch.zeros_like(x)
28
+ grad_weight = torch.zeros_like(weight)
29
+ ops.launch_backward_kernel(x, weight, grad_output, grad_input, grad_weight, epsilon)
30
+ grad_input = grad_input.view(original_shape)
31
+ grad_weight = grad_weight.view(original_shape)
32
+ return grad_input, grad_weight
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__init__.py CHANGED
@@ -1,25 +1,3 @@
1
- from ._ops import ops
2
 
3
- import torch
4
-
5
-
6
- def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
7
- original_shape = x.shape
8
- x = x.view(-1, x.shape[-1])
9
- weight = weight.view(-1)
10
- output = torch.zeros_like(x)
11
- ops.launch_forward_kernel(x, weight, output, epsilon)
12
- output = output.view(original_shape)
13
- return output
14
-
15
- def rmsnorm_backward(x: torch.Tensor, weight: torch.Tensor, grad_output: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
16
- original_shape = x.shape
17
- x = x.view(-1, x.shape[-1])
18
- weight = weight.view(-1)
19
- grad_output = grad_output.view(-1)
20
- grad_input = torch.zeros_like(x)
21
- grad_weight = torch.zeros_like(weight)
22
- ops.launch_backward_kernel(x, weight, grad_output, grad_input, grad_weight, epsilon)
23
- grad_input = grad_input.view(original_shape)
24
- grad_weight = grad_weight.view(original_shape)
25
- return grad_input, grad_weight
 
1
+ from .layers import RMSNorm, rmsnorm_forward, rmsnorm_backward
2
 
3
+ __all__ = ["RMSNorm", "rmsnorm_forward", "rmsnorm_backward"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/__init__.cpython-313.pyc CHANGED
Binary files a/build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/__init__.cpython-313.pyc and b/build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/__init__.cpython-313.pyc differ
 
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/_ops.cpython-313.pyc CHANGED
Binary files a/build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/_ops.cpython-313.pyc and b/build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/_ops.cpython-313.pyc differ
 
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/__pycache__/layers.cpython-313.pyc ADDED
Binary file (2.6 kB). View file
 
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/{_mlx_rmsnorm_97571a8_dirty.abi3.so → _mlx_rmsnorm_57d865f.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9841a7657253626aefe2c9cd346fd61de8d60e4e5484e7ae9230c44232bf9fd1
3
- size 220160
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a507684a2ed7b127800e22962811212ac657ac126665f645e4a4bfe78e4bb3d8
3
+ size 220128
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mlx_rmsnorm_97571a8_dirty
3
- ops = torch.ops._mlx_rmsnorm_97571a8_dirty
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mlx_rmsnorm_97571a8_dirty::{op_name}"
 
1
  import torch
2
+ from . import _mlx_rmsnorm_57d865f
3
+ ops = torch.ops._mlx_rmsnorm_57d865f
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mlx_rmsnorm_57d865f::{op_name}"
build/torch29-metal-aarch64-darwin/mlx_rmsnorm/layers.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+
3
+ import torch
4
+
5
+ class RMSNorm(torch.nn.Module):
6
+ weight: torch.Tensor
7
+ eps: float
8
+
9
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
10
+ return rmsnorm_forward(x, self.weight, self.eps)
11
+
12
+
13
+ def rmsnorm_forward(x: torch.Tensor, weight: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
14
+ original_shape = x.shape
15
+ x = x.view(-1, x.shape[-1])
16
+ weight = weight.view(-1)
17
+ output = torch.zeros_like(x)
18
+ ops.launch_forward_kernel(x, weight, output, epsilon)
19
+ output = output.view(original_shape)
20
+ return output
21
+
22
+ def rmsnorm_backward(x: torch.Tensor, weight: torch.Tensor, grad_output: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor:
23
+ original_shape = x.shape
24
+ x = x.view(-1, x.shape[-1])
25
+ weight = weight.view(-1)
26
+ grad_output = grad_output.view(-1)
27
+ grad_input = torch.zeros_like(x)
28
+ grad_weight = torch.zeros_like(weight)
29
+ ops.launch_backward_kernel(x, weight, grad_output, grad_input, grad_weight, epsilon)
30
+ grad_input = grad_input.view(original_shape)
31
+ grad_weight = grad_weight.view(original_shape)
32
+ return grad_input, grad_weight