| import torch | |
| from common.diff_engine import DiffCase | |
| import activation | |
| class FusedMulPolyNorm(torch.nn.Module): | |
| def __init__(self, eps=1e-6, dtype: torch.dtype = torch.float32): | |
| super().__init__() | |
| self.weight = torch.nn.Parameter(torch.ones(3, dtype=dtype) / 3) | |
| self.bias = torch.nn.Parameter(torch.zeros(1, dtype=dtype)) | |
| self.eps = eps | |
| def forward(self, x, mul): | |
| output = activation.poly_norm(x, self.weight, self.bias, self.eps) | |
| return output * mul | |
| class MulPoly(DiffCase): | |
| def build_inputs(self, bs, sl, hidden, dtype, eps): | |
| return { | |
| "x": torch.randn(bs, sl, hidden, dtype=dtype, requires_grad=True), | |
| "mul": torch.randn(bs, sl, hidden, dtype=dtype, | |
| requires_grad=True), | |
| "weight": torch.ones(3, dtype=dtype), | |
| "bias": torch.ones(1, dtype=dtype), | |
| "dim": hidden, | |
| "eps": eps, | |
| "dtype": dtype, | |
| } | |
| def make_naive(self, I): | |
| m = FusedMulPolyNorm(I["eps"], dtype=I["dtype"]) | |
| m.weight = torch.nn.Parameter(I["weight"].detach().clone()) | |
| m.bias = torch.nn.Parameter(I["bias"].detach().clone()) | |
| return m | |
| def make_cuda(self, I): | |
| m = activation.layers.FusedMulPolyNorm(I["eps"], dtype=I["dtype"]) | |
| m.weight = torch.nn.Parameter(I["weight"].detach().clone()) | |
| m.bias = torch.nn.Parameter(I["bias"].detach().clone()) | |
| return m | |
| def forward(self, obj, I): | |
| return obj(I["x"], I["mul"]) | |
| def grad_inputs(self, I): | |
| return [I["x"], I["mul"]] | |
| CASE = MulPoly() | |