danieldk HF Staff commited on
Commit
811c00f
·
verified ·
1 Parent(s): 6d826fc

Build uploaded using `kernels`.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. build/torch210-cxx11-cu126-x86_64-linux/__init__.py +0 -26
  2. build/torch210-cxx11-cu126-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  3. build/torch210-cxx11-cu126-x86_64-linux/_ops.py +0 -9
  4. build/torch210-cxx11-cu126-x86_64-linux/layer_norm/__init__.py +0 -26
  5. build/torch210-cxx11-cu126-x86_64-linux/layers.py +0 -51
  6. build/torch210-cxx11-cu126-x86_64-linux/metadata.json +0 -1
  7. build/torch210-cxx11-cu128-x86_64-linux/__init__.py +0 -26
  8. build/torch210-cxx11-cu128-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  9. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +0 -9
  10. build/torch210-cxx11-cu128-x86_64-linux/layer_norm/__init__.py +0 -26
  11. build/torch210-cxx11-cu128-x86_64-linux/layers.py +0 -51
  12. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +0 -1
  13. build/torch210-cxx11-cu130-x86_64-linux/__init__.py +0 -26
  14. build/torch210-cxx11-cu130-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  15. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +0 -9
  16. build/torch210-cxx11-cu130-x86_64-linux/layer_norm/__init__.py +0 -26
  17. build/torch210-cxx11-cu130-x86_64-linux/layers.py +0 -51
  18. build/torch210-cxx11-cu130-x86_64-linux/metadata.json +0 -1
  19. build/torch28-cxx11-cu126-x86_64-linux/__init__.py +0 -26
  20. build/torch28-cxx11-cu126-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  21. build/torch28-cxx11-cu126-x86_64-linux/_ops.py +0 -9
  22. build/torch28-cxx11-cu126-x86_64-linux/layer_norm/__init__.py +0 -26
  23. build/torch28-cxx11-cu126-x86_64-linux/layers.py +0 -51
  24. build/torch28-cxx11-cu126-x86_64-linux/metadata.json +0 -1
  25. build/torch28-cxx11-cu128-x86_64-linux/__init__.py +0 -26
  26. build/torch28-cxx11-cu128-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  27. build/torch28-cxx11-cu128-x86_64-linux/_ops.py +0 -9
  28. build/torch28-cxx11-cu128-x86_64-linux/layer_norm/__init__.py +0 -26
  29. build/torch28-cxx11-cu128-x86_64-linux/layers.py +0 -51
  30. build/torch28-cxx11-cu128-x86_64-linux/metadata.json +0 -1
  31. build/torch28-cxx11-cu129-x86_64-linux/__init__.py +0 -26
  32. build/torch28-cxx11-cu129-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  33. build/torch28-cxx11-cu129-x86_64-linux/_ops.py +0 -9
  34. build/torch28-cxx11-cu129-x86_64-linux/layer_norm/__init__.py +0 -26
  35. build/torch28-cxx11-cu129-x86_64-linux/layers.py +0 -51
  36. build/torch28-cxx11-cu129-x86_64-linux/metadata.json +0 -1
  37. build/torch29-cxx11-cu126-x86_64-linux/__init__.py +0 -26
  38. build/torch29-cxx11-cu126-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  39. build/torch29-cxx11-cu126-x86_64-linux/_ops.py +0 -9
  40. build/torch29-cxx11-cu126-x86_64-linux/layer_norm/__init__.py +0 -26
  41. build/torch29-cxx11-cu126-x86_64-linux/layers.py +0 -51
  42. build/torch29-cxx11-cu126-x86_64-linux/metadata.json +0 -1
  43. build/torch29-cxx11-cu128-x86_64-linux/__init__.py +0 -26
  44. build/torch29-cxx11-cu128-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
  45. build/torch29-cxx11-cu128-x86_64-linux/_ops.py +0 -9
  46. build/torch29-cxx11-cu128-x86_64-linux/layer_norm/__init__.py +0 -26
  47. build/torch29-cxx11-cu128-x86_64-linux/layers.py +0 -51
  48. build/torch29-cxx11-cu128-x86_64-linux/metadata.json +0 -1
  49. build/torch29-cxx11-cu130-x86_64-linux/__init__.py +0 -26
  50. build/torch29-cxx11-cu130-x86_64-linux/_layer_norm_fd07706.abi3.so +0 -3
build/torch210-cxx11-cu126-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu126-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:49fd317d18b8b13367c70f037d1e8e3077aad8318d6dc40cd3050ab6f4e1d091
3
- size 712114272
 
 
 
 
build/torch210-cxx11-cu126-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu126-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu126-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu126-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch210-cxx11-cu128-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu128-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8f9c486fa147def1328121949fe502ba856d73e599a00844acf78faa8129cee
3
- size 1231439976
 
 
 
 
build/torch210-cxx11-cu128-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu128-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu128-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu128-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch210-cxx11-cu130-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu130-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:657b35fbbd096c4e34b804790484286941b781ef936fb920f9f1d10f7b0d4281
3
- size 1238357112
 
 
 
 
build/torch210-cxx11-cu130-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu130-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu130-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch210-cxx11-cu130-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch28-cxx11-cu126-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu126-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f4c4fce45ad6f08cfa1a3e2c7851c0964524975543a3e16b72406b6c8187bba4
3
- size 712034088
 
 
 
 
build/torch28-cxx11-cu126-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu126-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu126-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu126-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch28-cxx11-cu128-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu128-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5821346938e86e0308c60fd072d54b57aba427aac75e354d3132dddc755ba125
3
- size 1231343024
 
 
 
 
build/torch28-cxx11-cu128-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu128-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu128-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu128-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch28-cxx11-cu129-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:43c278069ef7e766a8eae76c27b4c91a3e84065c4714f7d9e0d6ff8413732e7a
3
- size 1283038336
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch28-cxx11-cu129-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch29-cxx11-cu126-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu126-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc404a5e076466f49a0be4fa53652f2a7b40f1c611478ba8d1c4ef07c524815a
3
- size 712034248
 
 
 
 
build/torch29-cxx11-cu126-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu126-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu126-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu126-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch29-cxx11-cu128-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu128-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8da63d5fa4aeca09b5b5f1b3355c401fc516a15622637a2c65a03081fc55fdb3
3
- size 1231343160
 
 
 
 
build/torch29-cxx11-cu128-x86_64-linux/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _layer_norm_fd07706
3
- ops = torch.ops._layer_norm_fd07706
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_layer_norm_fd07706::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu128-x86_64-linux/layer_norm/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import ctypes
2
- import sys
3
-
4
- import importlib
5
- from pathlib import Path
6
- from types import ModuleType
7
-
8
- def _import_from_path(file_path: Path) -> ModuleType:
9
- # We cannot use the module name as-is, after adding it to `sys.modules`,
10
- # it would also be used for other imports. So, we make a module name that
11
- # depends on the path for it to be unique using the hex-encoded hash of
12
- # the path.
13
- path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
- module_name = path_hash
15
- spec = importlib.util.spec_from_file_location(module_name, file_path)
16
- if spec is None:
17
- raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
- module = importlib.util.module_from_spec(spec)
19
- if module is None:
20
- raise ImportError(f"Cannot load module {module_name} from spec")
21
- sys.modules[module_name] = module
22
- spec.loader.exec_module(module) # type: ignore
23
- return module
24
-
25
-
26
- globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu128-x86_64-linux/layers.py DELETED
@@ -1,51 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class LayerNorm(nn.Module):
8
- weight: torch.Tensor
9
- variance_epsilon: float
10
-
11
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
12
- output = ops.dropout_add_ln_fwd(
13
- hidden_states.view(-1, hidden_states.shape[-1]),
14
- gamma = self.weight,
15
- beta = None,
16
- rowscale = None,
17
- colscale = None,
18
- x0_subset = None,
19
- z_subset = None,
20
- dropout_p = 0,
21
- epsilon = self.variance_epsilon,
22
- rowscale_const = 1.0,
23
- z_numrows = hidden_states.shape[1],
24
- gen = None,
25
- residual_in_fp32 = False,
26
- is_rms_norm = False,
27
- )
28
- return output[0].view(hidden_states.shape)
29
-
30
- class LlamaRMSNorm(nn.Module):
31
- weight: torch.Tensor
32
- variance_epsilon: float
33
-
34
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
35
- output = ops.dropout_add_ln_fwd(
36
- hidden_states.view(-1, hidden_states.shape[-1]),
37
- gamma = self.weight,
38
- beta = None,
39
- rowscale = None,
40
- colscale = None,
41
- x0_subset = None,
42
- z_subset = None,
43
- dropout_p = 0,
44
- epsilon = self.variance_epsilon,
45
- rowscale_const = 1.0,
46
- z_numrows = hidden_states.shape[1],
47
- gen = None,
48
- residual_in_fp32 = False,
49
- is_rms_norm = True,
50
- )
51
- return output[0].view(hidden_states.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu128-x86_64-linux/metadata.json DELETED
@@ -1 +0,0 @@
1
- {"python-depends":[]}
 
 
build/torch29-cxx11-cu130-x86_64-linux/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
- from . import layers
7
-
8
- def dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm):
9
- return ops.dropout_add_ln_fwd(input, gamma, beta, rowscale, colscale, x0_subset, z_subset, dropout_p, epsilon, rowscale_const, z_numrows, gen, residual_in_fp32, is_rms_norm)
10
-
11
- def dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm):
12
- return ops.dropout_add_ln_bwd(dz, dx, x, mu, rsigma, gamma, rowscale, colscale, x0_subset, z_subset, dropout_p, rowscale_const, x0_numrows, has_residual, is_rms_norm)
13
-
14
- def dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm):
15
- return ops.dropout_add_ln_parallel_residual_fwd(input, gamma0, beta0, gamma1, beta1, dropout_p, epsilon, gen, residual_in_fp32, is_rms_norm)
16
-
17
- def dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm):
18
- return ops.dropout_add_ln_parallel_residual_bwd(dz0, dz1, dx, x, mu, rsigma, gamma0, gamma1, dropout_p, has_x1, has_residual, is_rms_norm)
19
-
20
- __all__ = [
21
- "layers",
22
- "dropout_add_ln_fwd",
23
- "dropout_add_ln_bwd",
24
- "dropout_add_ln_parallel_residual_fwd",
25
- "dropout_add_ln_parallel_residual_bwd",
26
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch29-cxx11-cu130-x86_64-linux/_layer_norm_fd07706.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bf6e51b89bda807e770de087312693e67a4f215e8b036c39e92b6bd7de12ebb
3
- size 1238272584