Build
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py +0 -52
- build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py +0 -9
- build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py +0 -65
- build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py +0 -52
- build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py +0 -9
- build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py +0 -65
- build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py +0 -52
- build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py +0 -9
- build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py +0 -65
- build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py +0 -52
- build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py +0 -9
- build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py +0 -65
- build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py +0 -52
- build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py +0 -9
- build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py +0 -65
- build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py +0 -52
- build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py +0 -9
- build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py +0 -65
- build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/{torch25-cxx11-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so} +2 -2
- build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -3
- build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py +14 -0
- build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/{torch25-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so} +2 -2
- build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -3
- build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py +14 -0
- build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py +0 -52
- build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +0 -3
- build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py +0 -9
- build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py +0 -65
- build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/{torch27-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so} +1 -1
- build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py +3 -3
- build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py +14 -0
- build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/{torch25-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so} +2 -2
- build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -3
- build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py +14 -0
- build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/{torch25-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so} +2 -2
- build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -3
- build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py +14 -0
- build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py +0 -52
- build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +0 -3
- build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py +0 -9
- build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py +0 -65
- build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
- build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so +3 -0
build/torch25-cxx11-cu118-x86_64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_78448fa
|
| 3 |
-
ops = torch.ops._activation_78448fa
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_78448fa::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu118-x86_64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu121-x86_64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_78448fa
|
| 3 |
-
ops = torch.ops._activation_78448fa
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_78448fa::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu121-x86_64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu124-x86_64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_78448fa
|
| 3 |
-
ops = torch.ops._activation_78448fa
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_78448fa::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx11-cu124-x86_64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu118-x86_64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_78448fa
|
| 3 |
-
ops = torch.ops._activation_78448fa
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_78448fa::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu118-x86_64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu121-x86_64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:7e6475ed603ad2cb565bd19ad2554484bd6c00d0d3f02decff60f2285df2546f
|
| 3 |
-
size 2463232
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_78448fa
|
| 3 |
-
ops = torch.ops._activation_78448fa
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_78448fa::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu121-x86_64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu124-x86_64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:a0767f6dba00c543d3cb77e2044bccd32ef569abc55b921231112c8a1ddfb187
|
| 3 |
-
size 2502088
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_78448fa
|
| 3 |
-
ops = torch.ops._activation_78448fa
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_78448fa::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch25-cxx98-cu124-x86_64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:e0c04d860454cc565113a3c93ff755fe9cbba0578c4604b89ad89e47c2503932
|
| 3 |
-
size 2448056
|
|
|
|
|
|
|
|
|
|
|
|
build/{torch25-cxx11-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx11-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b925dc27b6a9afd5b6d11e454275222c531a92f7ca27958ac81a78c580665e4d
|
| 3 |
+
size 2448088
|
build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _activation_e99cc09_dirty
|
| 3 |
+
ops = torch.ops._activation_e99cc09_dirty
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_activation_e99cc09_dirty::{op_name}"
|
build/torch26-cxx11-cu118-x86_64-linux/activation/layers.py
CHANGED
|
@@ -5,6 +5,8 @@ from ._ops import ops
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
|
|
|
|
|
|
| 8 |
def forward(self, x: torch.Tensor):
|
| 9 |
d = x.shape[-1] // 2
|
| 10 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -14,6 +16,8 @@ class SiluAndMul(nn.Module):
|
|
| 14 |
|
| 15 |
|
| 16 |
class GeluAndMul(nn.Module):
|
|
|
|
|
|
|
| 17 |
def forward(self, x: torch.Tensor):
|
| 18 |
d = x.shape[-1] // 2
|
| 19 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -23,6 +27,8 @@ class GeluAndMul(nn.Module):
|
|
| 23 |
|
| 24 |
|
| 25 |
class GeluTanhAndMul(nn.Module):
|
|
|
|
|
|
|
| 26 |
def forward(self, x: torch.Tensor):
|
| 27 |
d = x.shape[-1] // 2
|
| 28 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module):
|
|
| 32 |
|
| 33 |
|
| 34 |
class FatreluAndMul(nn.Module):
|
|
|
|
|
|
|
| 35 |
def __init__(self, threshold: float = 0.0):
|
| 36 |
super().__init__()
|
| 37 |
self.threshold = threshold
|
|
@@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module):
|
|
| 45 |
|
| 46 |
|
| 47 |
class FastGELU(nn.Module):
|
|
|
|
|
|
|
| 48 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
out = torch.empty_like(x)
|
| 50 |
ops.gelu_fast(out, x)
|
|
@@ -52,6 +62,8 @@ class FastGELU(nn.Module):
|
|
| 52 |
|
| 53 |
|
| 54 |
class NewGELU(nn.Module):
|
|
|
|
|
|
|
| 55 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
out = torch.empty_like(x)
|
| 57 |
ops.gelu_new(out, x)
|
|
@@ -59,6 +71,8 @@ class NewGELU(nn.Module):
|
|
| 59 |
|
| 60 |
|
| 61 |
class QuickGELU(nn.Module):
|
|
|
|
|
|
|
| 62 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
out = torch.empty_like(x)
|
| 64 |
ops.gelu_quick(out, x)
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
| 8 |
+
can_torch_compile: bool = True
|
| 9 |
+
|
| 10 |
def forward(self, x: torch.Tensor):
|
| 11 |
d = x.shape[-1] // 2
|
| 12 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
class GeluAndMul(nn.Module):
|
| 19 |
+
can_torch_compile: bool = True
|
| 20 |
+
|
| 21 |
def forward(self, x: torch.Tensor):
|
| 22 |
d = x.shape[-1] // 2
|
| 23 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
class GeluTanhAndMul(nn.Module):
|
| 30 |
+
can_torch_compile: bool = True
|
| 31 |
+
|
| 32 |
def forward(self, x: torch.Tensor):
|
| 33 |
d = x.shape[-1] // 2
|
| 34 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
class FatreluAndMul(nn.Module):
|
| 41 |
+
can_torch_compile: bool = True
|
| 42 |
+
|
| 43 |
def __init__(self, threshold: float = 0.0):
|
| 44 |
super().__init__()
|
| 45 |
self.threshold = threshold
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
class FastGELU(nn.Module):
|
| 56 |
+
can_torch_compile: bool = True
|
| 57 |
+
|
| 58 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 59 |
out = torch.empty_like(x)
|
| 60 |
ops.gelu_fast(out, x)
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
class NewGELU(nn.Module):
|
| 65 |
+
can_torch_compile: bool = True
|
| 66 |
+
|
| 67 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 68 |
out = torch.empty_like(x)
|
| 69 |
ops.gelu_new(out, x)
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
class QuickGELU(nn.Module):
|
| 74 |
+
can_torch_compile: bool = True
|
| 75 |
+
|
| 76 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 77 |
out = torch.empty_like(x)
|
| 78 |
ops.gelu_quick(out, x)
|
build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:48d7b0d190af1dd0366dbaeb0690b9c7cd1dfdc9aeda9b0b23bce56c70f5cbae
|
| 3 |
-
size 2509928
|
|
|
|
|
|
|
|
|
|
|
|
build/{torch25-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx11-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cfdbe510752b57a8dc4671f744bb0a2da5b1646e0b9a19fec02f1505ba044c8c
|
| 3 |
+
size 2509960
|
build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _activation_e99cc09_dirty
|
| 3 |
+
ops = torch.ops._activation_e99cc09_dirty
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_activation_e99cc09_dirty::{op_name}"
|
build/torch26-cxx11-cu124-x86_64-linux/activation/layers.py
CHANGED
|
@@ -5,6 +5,8 @@ from ._ops import ops
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
|
|
|
|
|
|
| 8 |
def forward(self, x: torch.Tensor):
|
| 9 |
d = x.shape[-1] // 2
|
| 10 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -14,6 +16,8 @@ class SiluAndMul(nn.Module):
|
|
| 14 |
|
| 15 |
|
| 16 |
class GeluAndMul(nn.Module):
|
|
|
|
|
|
|
| 17 |
def forward(self, x: torch.Tensor):
|
| 18 |
d = x.shape[-1] // 2
|
| 19 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -23,6 +27,8 @@ class GeluAndMul(nn.Module):
|
|
| 23 |
|
| 24 |
|
| 25 |
class GeluTanhAndMul(nn.Module):
|
|
|
|
|
|
|
| 26 |
def forward(self, x: torch.Tensor):
|
| 27 |
d = x.shape[-1] // 2
|
| 28 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module):
|
|
| 32 |
|
| 33 |
|
| 34 |
class FatreluAndMul(nn.Module):
|
|
|
|
|
|
|
| 35 |
def __init__(self, threshold: float = 0.0):
|
| 36 |
super().__init__()
|
| 37 |
self.threshold = threshold
|
|
@@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module):
|
|
| 45 |
|
| 46 |
|
| 47 |
class FastGELU(nn.Module):
|
|
|
|
|
|
|
| 48 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
out = torch.empty_like(x)
|
| 50 |
ops.gelu_fast(out, x)
|
|
@@ -52,6 +62,8 @@ class FastGELU(nn.Module):
|
|
| 52 |
|
| 53 |
|
| 54 |
class NewGELU(nn.Module):
|
|
|
|
|
|
|
| 55 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
out = torch.empty_like(x)
|
| 57 |
ops.gelu_new(out, x)
|
|
@@ -59,6 +71,8 @@ class NewGELU(nn.Module):
|
|
| 59 |
|
| 60 |
|
| 61 |
class QuickGELU(nn.Module):
|
|
|
|
|
|
|
| 62 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
out = torch.empty_like(x)
|
| 64 |
ops.gelu_quick(out, x)
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
| 8 |
+
can_torch_compile: bool = True
|
| 9 |
+
|
| 10 |
def forward(self, x: torch.Tensor):
|
| 11 |
d = x.shape[-1] // 2
|
| 12 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
class GeluAndMul(nn.Module):
|
| 19 |
+
can_torch_compile: bool = True
|
| 20 |
+
|
| 21 |
def forward(self, x: torch.Tensor):
|
| 22 |
d = x.shape[-1] // 2
|
| 23 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
class GeluTanhAndMul(nn.Module):
|
| 30 |
+
can_torch_compile: bool = True
|
| 31 |
+
|
| 32 |
def forward(self, x: torch.Tensor):
|
| 33 |
d = x.shape[-1] // 2
|
| 34 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
class FatreluAndMul(nn.Module):
|
| 41 |
+
can_torch_compile: bool = True
|
| 42 |
+
|
| 43 |
def __init__(self, threshold: float = 0.0):
|
| 44 |
super().__init__()
|
| 45 |
self.threshold = threshold
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
class FastGELU(nn.Module):
|
| 56 |
+
can_torch_compile: bool = True
|
| 57 |
+
|
| 58 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 59 |
out = torch.empty_like(x)
|
| 60 |
ops.gelu_fast(out, x)
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
class NewGELU(nn.Module):
|
| 65 |
+
can_torch_compile: bool = True
|
| 66 |
+
|
| 67 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 68 |
out = torch.empty_like(x)
|
| 69 |
ops.gelu_new(out, x)
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
class QuickGELU(nn.Module):
|
| 74 |
+
can_torch_compile: bool = True
|
| 75 |
+
|
| 76 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 77 |
out = torch.empty_like(x)
|
| 78 |
ops.gelu_quick(out, x)
|
build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:558e4499ad3c09d02633488cfdc802a228b78a8cd51d963c92239d44744298c7
|
| 3 |
-
size 2631936
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_bbdc1b4_dirty
|
| 3 |
-
ops = torch.ops._activation_bbdc1b4_dirty
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_bbdc1b4_dirty::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:11a11d0f4119edc5c637bab04ebd5669750a0e4f4000f58ab1bf5be2d8d9ab0b
|
| 3 |
-
size 2518568
|
|
|
|
|
|
|
|
|
|
|
|
build/{torch27-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx11-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 2518600
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70e544ad6448a5576d26147f48403f3e9e593f4a2e24167dc8acb81ce3b7932e
|
| 3 |
size 2518600
|
build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _activation_e99cc09_dirty
|
| 3 |
+
ops = torch.ops._activation_e99cc09_dirty
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_activation_e99cc09_dirty::{op_name}"
|
build/torch26-cxx11-cu126-x86_64-linux/activation/layers.py
CHANGED
|
@@ -5,6 +5,8 @@ from ._ops import ops
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
|
|
|
|
|
|
| 8 |
def forward(self, x: torch.Tensor):
|
| 9 |
d = x.shape[-1] // 2
|
| 10 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -14,6 +16,8 @@ class SiluAndMul(nn.Module):
|
|
| 14 |
|
| 15 |
|
| 16 |
class GeluAndMul(nn.Module):
|
|
|
|
|
|
|
| 17 |
def forward(self, x: torch.Tensor):
|
| 18 |
d = x.shape[-1] // 2
|
| 19 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -23,6 +27,8 @@ class GeluAndMul(nn.Module):
|
|
| 23 |
|
| 24 |
|
| 25 |
class GeluTanhAndMul(nn.Module):
|
|
|
|
|
|
|
| 26 |
def forward(self, x: torch.Tensor):
|
| 27 |
d = x.shape[-1] // 2
|
| 28 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module):
|
|
| 32 |
|
| 33 |
|
| 34 |
class FatreluAndMul(nn.Module):
|
|
|
|
|
|
|
| 35 |
def __init__(self, threshold: float = 0.0):
|
| 36 |
super().__init__()
|
| 37 |
self.threshold = threshold
|
|
@@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module):
|
|
| 45 |
|
| 46 |
|
| 47 |
class FastGELU(nn.Module):
|
|
|
|
|
|
|
| 48 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
out = torch.empty_like(x)
|
| 50 |
ops.gelu_fast(out, x)
|
|
@@ -52,6 +62,8 @@ class FastGELU(nn.Module):
|
|
| 52 |
|
| 53 |
|
| 54 |
class NewGELU(nn.Module):
|
|
|
|
|
|
|
| 55 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
out = torch.empty_like(x)
|
| 57 |
ops.gelu_new(out, x)
|
|
@@ -59,6 +71,8 @@ class NewGELU(nn.Module):
|
|
| 59 |
|
| 60 |
|
| 61 |
class QuickGELU(nn.Module):
|
|
|
|
|
|
|
| 62 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
out = torch.empty_like(x)
|
| 64 |
ops.gelu_quick(out, x)
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
| 8 |
+
can_torch_compile: bool = True
|
| 9 |
+
|
| 10 |
def forward(self, x: torch.Tensor):
|
| 11 |
d = x.shape[-1] // 2
|
| 12 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
class GeluAndMul(nn.Module):
|
| 19 |
+
can_torch_compile: bool = True
|
| 20 |
+
|
| 21 |
def forward(self, x: torch.Tensor):
|
| 22 |
d = x.shape[-1] // 2
|
| 23 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
class GeluTanhAndMul(nn.Module):
|
| 30 |
+
can_torch_compile: bool = True
|
| 31 |
+
|
| 32 |
def forward(self, x: torch.Tensor):
|
| 33 |
d = x.shape[-1] // 2
|
| 34 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
class FatreluAndMul(nn.Module):
|
| 41 |
+
can_torch_compile: bool = True
|
| 42 |
+
|
| 43 |
def __init__(self, threshold: float = 0.0):
|
| 44 |
super().__init__()
|
| 45 |
self.threshold = threshold
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
class FastGELU(nn.Module):
|
| 56 |
+
can_torch_compile: bool = True
|
| 57 |
+
|
| 58 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 59 |
out = torch.empty_like(x)
|
| 60 |
ops.gelu_fast(out, x)
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
class NewGELU(nn.Module):
|
| 65 |
+
can_torch_compile: bool = True
|
| 66 |
+
|
| 67 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 68 |
out = torch.empty_like(x)
|
| 69 |
ops.gelu_new(out, x)
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
class QuickGELU(nn.Module):
|
| 74 |
+
can_torch_compile: bool = True
|
| 75 |
+
|
| 76 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 77 |
out = torch.empty_like(x)
|
| 78 |
ops.gelu_quick(out, x)
|
build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:56dcc985761e309cbef3fc2a201f26e800583128d6e5a3fc1b23800fb0b8b48c
|
| 3 |
-
size 2440544
|
|
|
|
|
|
|
|
|
|
|
|
build/{torch25-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx98-cu118-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60fd224c33657558f03be5be57cc8d35ade23225b1abd71557b170c8a7010cd1
|
| 3 |
+
size 2440576
|
build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _activation_e99cc09_dirty
|
| 3 |
+
ops = torch.ops._activation_e99cc09_dirty
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_activation_e99cc09_dirty::{op_name}"
|
build/torch26-cxx98-cu118-x86_64-linux/activation/layers.py
CHANGED
|
@@ -5,6 +5,8 @@ from ._ops import ops
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
|
|
|
|
|
|
| 8 |
def forward(self, x: torch.Tensor):
|
| 9 |
d = x.shape[-1] // 2
|
| 10 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -14,6 +16,8 @@ class SiluAndMul(nn.Module):
|
|
| 14 |
|
| 15 |
|
| 16 |
class GeluAndMul(nn.Module):
|
|
|
|
|
|
|
| 17 |
def forward(self, x: torch.Tensor):
|
| 18 |
d = x.shape[-1] // 2
|
| 19 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -23,6 +27,8 @@ class GeluAndMul(nn.Module):
|
|
| 23 |
|
| 24 |
|
| 25 |
class GeluTanhAndMul(nn.Module):
|
|
|
|
|
|
|
| 26 |
def forward(self, x: torch.Tensor):
|
| 27 |
d = x.shape[-1] // 2
|
| 28 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module):
|
|
| 32 |
|
| 33 |
|
| 34 |
class FatreluAndMul(nn.Module):
|
|
|
|
|
|
|
| 35 |
def __init__(self, threshold: float = 0.0):
|
| 36 |
super().__init__()
|
| 37 |
self.threshold = threshold
|
|
@@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module):
|
|
| 45 |
|
| 46 |
|
| 47 |
class FastGELU(nn.Module):
|
|
|
|
|
|
|
| 48 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
out = torch.empty_like(x)
|
| 50 |
ops.gelu_fast(out, x)
|
|
@@ -52,6 +62,8 @@ class FastGELU(nn.Module):
|
|
| 52 |
|
| 53 |
|
| 54 |
class NewGELU(nn.Module):
|
|
|
|
|
|
|
| 55 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
out = torch.empty_like(x)
|
| 57 |
ops.gelu_new(out, x)
|
|
@@ -59,6 +71,8 @@ class NewGELU(nn.Module):
|
|
| 59 |
|
| 60 |
|
| 61 |
class QuickGELU(nn.Module):
|
|
|
|
|
|
|
| 62 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
out = torch.empty_like(x)
|
| 64 |
ops.gelu_quick(out, x)
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
| 8 |
+
can_torch_compile: bool = True
|
| 9 |
+
|
| 10 |
def forward(self, x: torch.Tensor):
|
| 11 |
d = x.shape[-1] // 2
|
| 12 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
class GeluAndMul(nn.Module):
|
| 19 |
+
can_torch_compile: bool = True
|
| 20 |
+
|
| 21 |
def forward(self, x: torch.Tensor):
|
| 22 |
d = x.shape[-1] // 2
|
| 23 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
class GeluTanhAndMul(nn.Module):
|
| 30 |
+
can_torch_compile: bool = True
|
| 31 |
+
|
| 32 |
def forward(self, x: torch.Tensor):
|
| 33 |
d = x.shape[-1] // 2
|
| 34 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
class FatreluAndMul(nn.Module):
|
| 41 |
+
can_torch_compile: bool = True
|
| 42 |
+
|
| 43 |
def __init__(self, threshold: float = 0.0):
|
| 44 |
super().__init__()
|
| 45 |
self.threshold = threshold
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
class FastGELU(nn.Module):
|
| 56 |
+
can_torch_compile: bool = True
|
| 57 |
+
|
| 58 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 59 |
out = torch.empty_like(x)
|
| 60 |
ops.gelu_fast(out, x)
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
class NewGELU(nn.Module):
|
| 65 |
+
can_torch_compile: bool = True
|
| 66 |
+
|
| 67 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 68 |
out = torch.empty_like(x)
|
| 69 |
ops.gelu_new(out, x)
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
class QuickGELU(nn.Module):
|
| 74 |
+
can_torch_compile: bool = True
|
| 75 |
+
|
| 76 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 77 |
out = torch.empty_like(x)
|
| 78 |
ops.gelu_quick(out, x)
|
build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:03c5f08322796d0736024412babe5d7f13bb1126387976ae12a80485a40d3883
|
| 3 |
-
size 2502240
|
|
|
|
|
|
|
|
|
|
|
|
build/{torch25-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so → torch26-cxx98-cu124-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so}
RENAMED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e364773259dc1b91f3c0d3b076da83c5a9c6ee18ffdace30315c602dffd1dabe
|
| 3 |
+
size 2502264
|
build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
import torch
|
| 2 |
-
from . import
|
| 3 |
-
ops = torch.ops.
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
-
return f"
|
|
|
|
| 1 |
import torch
|
| 2 |
+
from . import _activation_e99cc09_dirty
|
| 3 |
+
ops = torch.ops._activation_e99cc09_dirty
|
| 4 |
|
| 5 |
def add_op_namespace_prefix(op_name: str):
|
| 6 |
"""
|
| 7 |
Prefix op by namespace.
|
| 8 |
"""
|
| 9 |
+
return f"_activation_e99cc09_dirty::{op_name}"
|
build/torch26-cxx98-cu124-x86_64-linux/activation/layers.py
CHANGED
|
@@ -5,6 +5,8 @@ from ._ops import ops
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
|
|
|
|
|
|
| 8 |
def forward(self, x: torch.Tensor):
|
| 9 |
d = x.shape[-1] // 2
|
| 10 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -14,6 +16,8 @@ class SiluAndMul(nn.Module):
|
|
| 14 |
|
| 15 |
|
| 16 |
class GeluAndMul(nn.Module):
|
|
|
|
|
|
|
| 17 |
def forward(self, x: torch.Tensor):
|
| 18 |
d = x.shape[-1] // 2
|
| 19 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -23,6 +27,8 @@ class GeluAndMul(nn.Module):
|
|
| 23 |
|
| 24 |
|
| 25 |
class GeluTanhAndMul(nn.Module):
|
|
|
|
|
|
|
| 26 |
def forward(self, x: torch.Tensor):
|
| 27 |
d = x.shape[-1] // 2
|
| 28 |
output_shape = x.shape[:-1] + (d,)
|
|
@@ -32,6 +38,8 @@ class GeluTanhAndMul(nn.Module):
|
|
| 32 |
|
| 33 |
|
| 34 |
class FatreluAndMul(nn.Module):
|
|
|
|
|
|
|
| 35 |
def __init__(self, threshold: float = 0.0):
|
| 36 |
super().__init__()
|
| 37 |
self.threshold = threshold
|
|
@@ -45,6 +53,8 @@ class FatreluAndMul(nn.Module):
|
|
| 45 |
|
| 46 |
|
| 47 |
class FastGELU(nn.Module):
|
|
|
|
|
|
|
| 48 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
out = torch.empty_like(x)
|
| 50 |
ops.gelu_fast(out, x)
|
|
@@ -52,6 +62,8 @@ class FastGELU(nn.Module):
|
|
| 52 |
|
| 53 |
|
| 54 |
class NewGELU(nn.Module):
|
|
|
|
|
|
|
| 55 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
out = torch.empty_like(x)
|
| 57 |
ops.gelu_new(out, x)
|
|
@@ -59,6 +71,8 @@ class NewGELU(nn.Module):
|
|
| 59 |
|
| 60 |
|
| 61 |
class QuickGELU(nn.Module):
|
|
|
|
|
|
|
| 62 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
out = torch.empty_like(x)
|
| 64 |
ops.gelu_quick(out, x)
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
class SiluAndMul(nn.Module):
|
| 8 |
+
can_torch_compile: bool = True
|
| 9 |
+
|
| 10 |
def forward(self, x: torch.Tensor):
|
| 11 |
d = x.shape[-1] // 2
|
| 12 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
class GeluAndMul(nn.Module):
|
| 19 |
+
can_torch_compile: bool = True
|
| 20 |
+
|
| 21 |
def forward(self, x: torch.Tensor):
|
| 22 |
d = x.shape[-1] // 2
|
| 23 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 27 |
|
| 28 |
|
| 29 |
class GeluTanhAndMul(nn.Module):
|
| 30 |
+
can_torch_compile: bool = True
|
| 31 |
+
|
| 32 |
def forward(self, x: torch.Tensor):
|
| 33 |
d = x.shape[-1] // 2
|
| 34 |
output_shape = x.shape[:-1] + (d,)
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
class FatreluAndMul(nn.Module):
|
| 41 |
+
can_torch_compile: bool = True
|
| 42 |
+
|
| 43 |
def __init__(self, threshold: float = 0.0):
|
| 44 |
super().__init__()
|
| 45 |
self.threshold = threshold
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
class FastGELU(nn.Module):
|
| 56 |
+
can_torch_compile: bool = True
|
| 57 |
+
|
| 58 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 59 |
out = torch.empty_like(x)
|
| 60 |
ops.gelu_fast(out, x)
|
|
|
|
| 62 |
|
| 63 |
|
| 64 |
class NewGELU(nn.Module):
|
| 65 |
+
can_torch_compile: bool = True
|
| 66 |
+
|
| 67 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 68 |
out = torch.empty_like(x)
|
| 69 |
ops.gelu_new(out, x)
|
|
|
|
| 71 |
|
| 72 |
|
| 73 |
class QuickGELU(nn.Module):
|
| 74 |
+
can_torch_compile: bool = True
|
| 75 |
+
|
| 76 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 77 |
out = torch.empty_like(x)
|
| 78 |
ops.gelu_quick(out, x)
|
build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py
DELETED
|
@@ -1,52 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
|
| 3 |
-
from ._ops import ops
|
| 4 |
-
|
| 5 |
-
from . import layers
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 9 |
-
ops.silu_and_mul(out, x)
|
| 10 |
-
return out
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 14 |
-
ops.gelu_and_mul(out, x)
|
| 15 |
-
return out
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 19 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 20 |
-
return out
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
|
| 24 |
-
ops.fatrelu_and_mul(out, x, threshold)
|
| 25 |
-
return out
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 29 |
-
ops.gelu_fast(out, x)
|
| 30 |
-
return out
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 34 |
-
ops.gelu_new(out, x)
|
| 35 |
-
return out
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
|
| 39 |
-
ops.gelu_quick(out, x)
|
| 40 |
-
return out
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
__all__ = [
|
| 44 |
-
"silu_and_mul",
|
| 45 |
-
"gelu_and_mul",
|
| 46 |
-
"gelu_tanh_and_mul",
|
| 47 |
-
"fatrelu_and_mul",
|
| 48 |
-
"gelu_fast",
|
| 49 |
-
"gelu_new",
|
| 50 |
-
"gelu_quick",
|
| 51 |
-
"layers",
|
| 52 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f6afd50526ff4221cddd52cb947900cdf6bb95ad0a6bffcd1a86bda4d3f52349
|
| 3 |
-
size 2628128
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
from . import _activation_bbdc1b4_dirty
|
| 3 |
-
ops = torch.ops._activation_bbdc1b4_dirty
|
| 4 |
-
|
| 5 |
-
def add_op_namespace_prefix(op_name: str):
|
| 6 |
-
"""
|
| 7 |
-
Prefix op by namespace.
|
| 8 |
-
"""
|
| 9 |
-
return f"_activation_bbdc1b4_dirty::{op_name}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py
DELETED
|
@@ -1,65 +0,0 @@
|
|
| 1 |
-
import torch
|
| 2 |
-
import torch.nn as nn
|
| 3 |
-
|
| 4 |
-
from ._ops import ops
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
class SiluAndMul(nn.Module):
|
| 8 |
-
def forward(self, x: torch.Tensor):
|
| 9 |
-
d = x.shape[-1] // 2
|
| 10 |
-
output_shape = x.shape[:-1] + (d,)
|
| 11 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 12 |
-
ops.silu_and_mul(out, x)
|
| 13 |
-
return out
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
class GeluAndMul(nn.Module):
|
| 17 |
-
def forward(self, x: torch.Tensor):
|
| 18 |
-
d = x.shape[-1] // 2
|
| 19 |
-
output_shape = x.shape[:-1] + (d,)
|
| 20 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 21 |
-
ops.gelu_and_mul(out, x)
|
| 22 |
-
return out
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
class GeluTanhAndMul(nn.Module):
|
| 26 |
-
def forward(self, x: torch.Tensor):
|
| 27 |
-
d = x.shape[-1] // 2
|
| 28 |
-
output_shape = x.shape[:-1] + (d,)
|
| 29 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 30 |
-
ops.gelu_tanh_and_mul(out, x)
|
| 31 |
-
return out
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
class FatreluAndMul(nn.Module):
|
| 35 |
-
def __init__(self, threshold: float = 0.0):
|
| 36 |
-
super().__init__()
|
| 37 |
-
self.threshold = threshold
|
| 38 |
-
|
| 39 |
-
def forward(self, x: torch.Tensor):
|
| 40 |
-
d = x.shape[-1] // 2
|
| 41 |
-
output_shape = x.shape[:-1] + (d,)
|
| 42 |
-
out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
|
| 43 |
-
ops.fatrelu_and_mul(out, x, self.threshold)
|
| 44 |
-
return out
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
class FastGELU(nn.Module):
|
| 48 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 49 |
-
out = torch.empty_like(x)
|
| 50 |
-
ops.gelu_fast(out, x)
|
| 51 |
-
return out
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
class NewGELU(nn.Module):
|
| 55 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 56 |
-
out = torch.empty_like(x)
|
| 57 |
-
ops.gelu_new(out, x)
|
| 58 |
-
return out
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
class QuickGELU(nn.Module):
|
| 62 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 63 |
-
out = torch.empty_like(x)
|
| 64 |
-
ops.gelu_quick(out, x)
|
| 65 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:f6eae5c895c564fbd2524ce488f4e91e65dc63402cd41a8bc74474b7437b2e62
|
| 3 |
-
size 2506784
|
|
|
|
|
|
|
|
|
|
|
|
build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_e99cc09_dirty.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ac88cc0d3c65ab283d20608f3a097be29ee572e7856f10f8d7919536efd95b4
|
| 3 |
+
size 2506808
|