entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
Critic
import torch import torch.nn as nn class Critic(nn.Module): def __init__(self, observation_size, action_size): super().__init__() self.fc1 = nn.Linear(observation_size + action_size, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 1) def forward(self, x, action): h = torch.relu(self.fc1(torch.cat([x, action], dim=1))) h = torch.relu(self.fc2(h)) return self.fc3(h) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'observation_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (256, 8), (8, 1)) assert_size_stride(primals_4, (256,), (1,)) assert_size_stride(primals_5, (256, 256), (256, 1)) assert_size_stride(primals_6, (256,), (1,)) assert_size_stride(primals_7, (1, 256), (256, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 256), (1, 8), 0), out=buf1) del primals_3 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(1024)](buf2, primals_4, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (256, 256), ( 1, 256), 0), out=buf3) buf4 = buf3 del buf3 triton_poi_fused_relu_1[grid(1024)](buf4, primals_6, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_6 buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (256, 1), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_8 return buf6, buf0, buf2, buf4, primals_7, primals_5 class CriticNew(nn.Module): def __init__(self, observation_size, action_size): super().__init__() self.fc1 = nn.Linear(observation_size + action_size, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, 1) def forward(self, input_0, input_1): primals_3 = self.fc1.weight primals_4 = self.fc1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
SeanNobel/d4rl-pybullet
Critic
false
14,379
[ "MIT" ]
130
9f2f56c63bb7a80ebcbc4217cd7689e446aafd41
https://github.com/SeanNobel/d4rl-pybullet/tree/9f2f56c63bb7a80ebcbc4217cd7689e446aafd41
HardSwish
import torch import torch.nn as nn class HardSwish(nn.Module): def __init__(self, inplace=False): super(HardSwish, self).__init__() self.act = nn.ReLU6(inplace) """forward""" def forward(self, x): return x * self.act(x + 3) / 6 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = tmp0 * tmp6 tmp8 = 0.16666666666666666 tmp9 = tmp7 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardSwishNew(nn.Module): def __init__(self, inplace=False): super(HardSwishNew, self).__init__() self.act = nn.ReLU6(inplace) """forward""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SegmentationBLWX/sssegmentation
HardSwish
false
14,380
[ "MIT" ]
411
0b2e3ff5abd7b97e15ac8daf63ea214688c26541
https://github.com/SegmentationBLWX/sssegmentation/tree/0b2e3ff5abd7b97e15ac8daf63ea214688c26541
EqualizedConv2d
import math import torch import torch.nn as nn import torch.utils.cpp_extension @torch.no_grad() def scaling_init(tensor, scale=1, dist='u'): fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(tensor) scale /= (fan_in + fan_out) / 2 if dist == 'n': std = math.sqrt(scale) return tensor.normal_(0.0, std) elif dist == 'u': bound = math.sqrt(3 * scale) return tensor.uniform_(-bound, bound) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, bias= True, scale=1.0): _conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias) scaling_init(_conv.weight, scale) if _conv.bias is not None: nn.init.zeros_(_conv.bias) return _conv class EqualizedLR(nn.Module): """ equalized learning rate """ def __init__(self, layer, gain=2): super(EqualizedLR, self).__init__() self.wscale = (gain / layer.weight[0].numel()) ** 0.5 self.layer = layer def forward(self, x, gain=2): x = self.layer(x * self.wscale) return x class EqualizedConv2d(nn.Module): """ equalized convolutional layer """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__() conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs) conv.weight.data.normal_(0, 1) conv.bias.data.fill_(0.0) self.conv = EqualizedLR(conv) def forward(self, x): x = self.conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.1767766952966369 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf2, primals_2, buf0 @torch.no_grad() def scaling_init(tensor, scale=1, dist='u'): fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(tensor) scale /= (fan_in + fan_out) / 2 if dist == 'n': std = math.sqrt(scale) return tensor.normal_(0.0, std) elif dist == 'u': bound = math.sqrt(3 * scale) return tensor.uniform_(-bound, bound) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, bias= True, scale=1.0): _conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias) scaling_init(_conv.weight, scale) if _conv.bias is not None: nn.init.zeros_(_conv.bias) return _conv class EqualizedLR(nn.Module): """ equalized learning rate """ def __init__(self, layer, gain=2): super(EqualizedLR, self).__init__() self.wscale = (gain / layer.weight[0].numel()) ** 0.5 self.layer = layer def forward(self, x, gain=2): x = self.layer(x * self.wscale) return x class EqualizedConv2dNew(nn.Module): """ equalized convolutional layer """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__() conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs) conv.weight.data.normal_(0, 1) conv.bias.data.fill_(0.0) self.conv = EqualizedLR(conv) def forward(self, input_0): primals_1 = self.conv.layer.weight primals_3 = self.conv.layer.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
STomoya/animeface
EqualizedConv2d
false
14,381
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
MinusRbfHSIC
import torch import torch.nn as nn import torch.utils.data class HSIC(nn.Module): """Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC) ..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent. Empirically, we use the finite sample estimator of HSIC (with m observations) by, (1) biased estimator (HSIC_0) Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. :math: (m - 1)^2 tr KHLH. where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices). (2) unbiased estimator (HSIC_1) Song, Le, et al. "Feature selection via dependence maximization." 2012. :math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg]. where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero. Parameters ---------- sigma_x : float the kernel size of the kernel function for X. sigma_y : float the kernel size of the kernel function for Y. algorithm: str ('unbiased' / 'biased') the algorithm for the finite sample estimator. 'unbiased' is used for our paper. reduction: not used (for compatibility with other losses). """ def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased', reduction=None): super(HSIC, self).__init__() if sigma_y is None: sigma_y = sigma_x self.sigma_x = sigma_x self.sigma_y = sigma_y if algorithm == 'biased': self.estimator = self.biased_estimator elif algorithm == 'unbiased': self.estimator = self.unbiased_estimator else: raise ValueError('invalid estimator: {}'.format(algorithm)) def _kernel_x(self, X): raise NotImplementedError def _kernel_y(self, Y): raise NotImplementedError def biased_estimator(self, input1, input2): """Biased estimator of Hilbert-Schmidt Independence Criterion Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. """ K = self._kernel_x(input1) L = self._kernel_y(input2) KH = K - K.mean(0, keepdim=True) LH = L - L.mean(0, keepdim=True) N = len(input1) return torch.trace(KH @ LH / (N - 1) ** 2) def unbiased_estimator(self, input1, input2): """Unbiased estimator of Hilbert-Schmidt Independence Criterion Song, Le, et al. "Feature selection via dependence maximization." 2012. """ kernel_XX = self._kernel_x(input1) kernel_YY = self._kernel_y(input2) tK = kernel_XX - torch.diag(kernel_XX) tL = kernel_YY - torch.diag(kernel_YY) N = len(input1) hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1 ) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2) return hsic / (N * (N - 3)) def forward(self, input1, input2, **kwargs): return self.estimator(input1, input2) class RbfHSIC(HSIC): """Radial Basis Function (RBF) kernel HSIC implementation. """ def _kernel(self, X, sigma): X = X.view(len(X), -1) XX = X @ X.t() X_sqnorms = torch.diag(XX) X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0) gamma = 1 / (2 * sigma ** 2) kernel_XX = torch.exp(-gamma * X_L2) return kernel_XX def _kernel_x(self, X): return self._kernel(X, self.sigma_x) def _kernel_y(self, Y): return self._kernel(Y, self.sigma_y) class MinusRbfHSIC(RbfHSIC): """``Minus'' RbfHSIC for the ``max'' optimization. """ def forward(self, input1, input2, **kwargs): return -self.estimator(input1, input2) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'sigma_x': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex r1 = rindex // 4 r0 = rindex % 4 tmp0 = tl.load(in_ptr0 + r2, None) tmp3 = tl.load(in_ptr0 + 5 * r1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp1 = -2.0 tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = -0.03125 tmp8 = tmp6 * tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = tmp5 * tmp1 tmp11 = tmp10 + tmp5 tmp12 = tmp11 + tmp5 tmp13 = tmp12 * tmp7 tmp14 = tl_math.exp(tmp13) tmp15 = tmp9 - tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp15, None) tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp18, None) @triton.jit def triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl .constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + r0, None) tmp5 = tl.load(in_ptr1 + (4 + r0), None) tmp7 = tl.load(in_ptr1 + (8 + r0), None) tmp9 = tl.load(in_ptr1 + (12 + r0), None) tmp11 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr2 + (4 + r0), None) tmp14 = tl.load(in_ptr2 + (8 + r0), None) tmp16 = tl.load(in_ptr2 + (12 + r0), None) tmp22 = tl.load(in_ptr3 + 0) tmp23 = tl.broadcast_to(tmp22, [XBLOCK, 1]) tmp24 = tl.load(in_ptr4 + 0) tmp25 = tl.broadcast_to(tmp24, [XBLOCK, 1]) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp18 = tmp10 * tmp17 tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = tl.sum(tmp19, 1)[:, None] tmp26 = tmp23 * tmp25 tmp27 = 0.3333333333333333 tmp28 = tmp26 * tmp27 tmp29 = 0.5 tmp30 = tmp28 * tmp29 tmp31 = tmp3 + tmp30 tmp32 = 2.0 tmp33 = tmp21 * tmp32 tmp34 = tmp33 * tmp29 tmp35 = tmp31 - tmp34 tmp36 = 0.25 tmp37 = tmp35 * tmp36 tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(arg0_1, (4, 64), (64, 1), 0), reinterpret_tensor(arg0_1, (64, 4), (1, 64), 0), out=buf0) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf6 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0[grid(1)](buf0, buf1, buf6, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf2 = buf0 del buf0 extern_kernels.mm(reinterpret_tensor(arg1_1, (4, 64), (64, 1), 0), reinterpret_tensor(arg1_1, (64, 4), (1, 64), 0), out=buf2) del arg1_1 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf7 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_diagonal_copy_exp_mul_sub_sum_0[grid(1)](buf2, buf3, buf7, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf4 = buf2 del buf2 extern_kernels.mm(buf1, buf3, out=buf4) buf5 = empty_strided_cuda((), (), torch.float32) buf9 = buf5 del buf5 triton_per_fused_add_div_dot_mul_neg_sub_sum_trace_1[grid(1)](buf9, buf4, buf1, buf3, buf6, buf7, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf1 del buf3 del buf4 del buf6 del buf7 return buf9, class HSIC(nn.Module): """Base class for the finite sample estimator of Hilbert-Schmidt Independence Criterion (HSIC) ..math:: HSIC (X, Y) := || C_{x, y} ||^2_{HS}, where HSIC (X, Y) = 0 iif X and Y are independent. Empirically, we use the finite sample estimator of HSIC (with m observations) by, (1) biased estimator (HSIC_0) Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. :math: (m - 1)^2 tr KHLH. where K_{ij} = kernel_x (x_i, x_j), L_{ij} = kernel_y (y_i, y_j), H = 1 - m^{-1} 1 1 (Hence, K, L, H are m by m matrices). (2) unbiased estimator (HSIC_1) Song, Le, et al. "Feature selection via dependence maximization." 2012. :math: rac{1}{m (m - 3)} igg[ tr ( ilde K ilde L) + rac{1^ op ilde K 1 1^ op ilde L 1}{(m-1)(m-2)} - rac{2}{m-2} 1^ op ilde K ilde L 1 igg]. where ilde K and ilde L are related to K and L by the diagonal entries of ilde K_{ij} and ilde L_{ij} are set to zero. Parameters ---------- sigma_x : float the kernel size of the kernel function for X. sigma_y : float the kernel size of the kernel function for Y. algorithm: str ('unbiased' / 'biased') the algorithm for the finite sample estimator. 'unbiased' is used for our paper. reduction: not used (for compatibility with other losses). """ def __init__(self, sigma_x, sigma_y=None, algorithm='unbiased', reduction=None): super(HSIC, self).__init__() if sigma_y is None: sigma_y = sigma_x self.sigma_x = sigma_x self.sigma_y = sigma_y if algorithm == 'biased': self.estimator = self.biased_estimator elif algorithm == 'unbiased': self.estimator = self.unbiased_estimator else: raise ValueError('invalid estimator: {}'.format(algorithm)) def _kernel_x(self, X): raise NotImplementedError def _kernel_y(self, Y): raise NotImplementedError def biased_estimator(self, input1, input2): """Biased estimator of Hilbert-Schmidt Independence Criterion Gretton, Arthur, et al. "Measuring statistical dependence with Hilbert-Schmidt norms." 2005. """ K = self._kernel_x(input1) L = self._kernel_y(input2) KH = K - K.mean(0, keepdim=True) LH = L - L.mean(0, keepdim=True) N = len(input1) return torch.trace(KH @ LH / (N - 1) ** 2) def unbiased_estimator(self, input1, input2): """Unbiased estimator of Hilbert-Schmidt Independence Criterion Song, Le, et al. "Feature selection via dependence maximization." 2012. """ kernel_XX = self._kernel_x(input1) kernel_YY = self._kernel_y(input2) tK = kernel_XX - torch.diag(kernel_XX) tL = kernel_YY - torch.diag(kernel_YY) N = len(input1) hsic = torch.trace(tK @ tL) + torch.sum(tK) * torch.sum(tL) / (N - 1 ) / (N - 2) - 2 * torch.sum(tK, 0).dot(torch.sum(tL, 0)) / (N - 2) return hsic / (N * (N - 3)) def forward(self, input1, input2, **kwargs): return self.estimator(input1, input2) class RbfHSIC(HSIC): """Radial Basis Function (RBF) kernel HSIC implementation. """ def _kernel(self, X, sigma): X = X.view(len(X), -1) XX = X @ X.t() X_sqnorms = torch.diag(XX) X_L2 = -2 * XX + X_sqnorms.unsqueeze(1) + X_sqnorms.unsqueeze(0) gamma = 1 / (2 * sigma ** 2) kernel_XX = torch.exp(-gamma * X_L2) return kernel_XX def _kernel_x(self, X): return self._kernel(X, self.sigma_x) def _kernel_y(self, Y): return self._kernel(Y, self.sigma_y) class MinusRbfHSICNew(RbfHSIC): """``Minus'' RbfHSIC for the ``max'' optimization. """ def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SanghyukChun/rebias
MinusRbfHSIC
false
14,382
[ "MIT" ]
129
6a4f6abdd68e080a08737d93a3c4b43e0f0ce055
https://github.com/SanghyukChun/rebias/tree/6a4f6abdd68e080a08737d93a3c4b43e0f0ce055
HardSigmoid
import torch import torch.nn as nn class HardSigmoid(nn.Module): def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0): super(HardSigmoid, self).__init__() assert divisor != 0, 'divisor is not allowed to be equal to zero' self.bias = bias self.divisor = divisor self.min_value = min_value self.max_value = max_value """forward""" def forward(self, x): x = (x + self.bias) / self.divisor return x.clamp_(self.min_value, self.max_value) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = triton_helpers.minimum(tmp6, tmp1) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardSigmoidNew(nn.Module): def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0): super(HardSigmoidNew, self).__init__() assert divisor != 0, 'divisor is not allowed to be equal to zero' self.bias = bias self.divisor = divisor self.min_value = min_value self.max_value = max_value """forward""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SegmentationBLWX/sssegmentation
HardSigmoid
false
14,383
[ "MIT" ]
411
0b2e3ff5abd7b97e15ac8daf63ea214688c26541
https://github.com/SegmentationBLWX/sssegmentation/tree/0b2e3ff5abd7b97e15ac8daf63ea214688c26541
FromRGB
import math import torch import torch.nn as nn import torch.utils.cpp_extension @torch.no_grad() def scaling_init(tensor, scale=1, dist='u'): fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(tensor) scale /= (fan_in + fan_out) / 2 if dist == 'n': std = math.sqrt(scale) return tensor.normal_(0.0, std) elif dist == 'u': bound = math.sqrt(3 * scale) return tensor.uniform_(-bound, bound) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, bias= True, scale=1.0): _conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias) scaling_init(_conv.weight, scale) if _conv.bias is not None: nn.init.zeros_(_conv.bias) return _conv class EqualizedLR(nn.Module): """ equalized learning rate """ def __init__(self, layer, gain=2): super(EqualizedLR, self).__init__() self.wscale = (gain / layer.weight[0].numel()) ** 0.5 self.layer = layer def forward(self, x, gain=2): x = self.layer(x * self.wscale) return x class EqualizedConv2d(nn.Module): """ equalized convolutional layer """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__() conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs) conv.weight.data.normal_(0, 1) conv.bias.data.fill_(0.0) self.conv = EqualizedLR(conv) def forward(self, x): x = self.conv(x) return x class FromRGB(nn.Module): """ from rgb """ def __init__(self, out_channels, in_channels=3): super(FromRGB, self).__init__() self.from_rgb = EqualizedConv2d(in_channels, out_channels, 1) def forward(self, x): x = self.from_rgb(x) return x def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn import torch.utils.cpp_extension assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.816496580927726 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (4, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(49152)](primals_1, buf0, 49152, XBLOCK= 512, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(65536)](buf2, primals_3, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 @torch.no_grad() def scaling_init(tensor, scale=1, dist='u'): fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(tensor) scale /= (fan_in + fan_out) / 2 if dist == 'n': std = math.sqrt(scale) return tensor.normal_(0.0, std) elif dist == 'u': bound = math.sqrt(3 * scale) return tensor.uniform_(-bound, bound) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0, bias= True, scale=1.0): _conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias) scaling_init(_conv.weight, scale) if _conv.bias is not None: nn.init.zeros_(_conv.bias) return _conv class EqualizedLR(nn.Module): """ equalized learning rate """ def __init__(self, layer, gain=2): super(EqualizedLR, self).__init__() self.wscale = (gain / layer.weight[0].numel()) ** 0.5 self.layer = layer def forward(self, x, gain=2): x = self.layer(x * self.wscale) return x class EqualizedConv2d(nn.Module): """ equalized convolutional layer """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__() conv = nn.Conv2d(in_channels, out_channels, kernel_size, **kwargs) conv.weight.data.normal_(0, 1) conv.bias.data.fill_(0.0) self.conv = EqualizedLR(conv) def forward(self, x): x = self.conv(x) return x class FromRGBNew(nn.Module): """ from rgb """ def __init__(self, out_channels, in_channels=3): super(FromRGBNew, self).__init__() self.from_rgb = EqualizedConv2d(in_channels, out_channels, 1) def forward(self, input_0): primals_2 = self.from_rgb.conv.layer.weight primals_3 = self.from_rgb.conv.layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
STomoya/animeface
FromRGB
false
14,384
[ "MIT" ]
61
37b3cd26097d7874559d4c152e41e5712b7a1a42
https://github.com/STomoya/animeface/tree/37b3cd26097d7874559d4c152e41e5712b7a1a42
Attention
import torch import torch.nn.functional as F import torch.nn as nn import torch.onnx import torch.nn.parallel class Attention(nn.Module): def __init__(self, dim): super(Attention, self).__init__() self.linear_out = nn.Linear(dim * 2, dim) self.mask = None def set_mask(self, mask): """ Sets indices to be masked Args: mask (torch.Tensor): tensor containing indices to be masked """ self.mask = mask def forward(self, output, context): batch_size = output.size(0) hidden_size = output.size(2) input_size = context.size(1) attn = torch.bmm(output, context.transpose(1, 2)) attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size) mix = torch.bmm(attn, context) combined = torch.cat((mix, output), dim=2) output = torch.tanh(self.linear_out(combined.view(-1, 2 * hidden_size)) ).view(batch_size, -1, hidden_size) return output, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.onnx import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_tanh_tanh_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp4 = tmp3 * tmp3 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tl.store(in_out_ptr0 + x2, tmp3, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 8), (8, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0) buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5) del primals_3 buf6 = buf5 del buf5 buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32) triton_poi_fused_tanh_tanh_backward_3[grid(64)](buf6, primals_4, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf7 class AttentionNew(nn.Module): def __init__(self, dim): super(AttentionNew, self).__init__() self.linear_out = nn.Linear(dim * 2, dim) self.mask = None def set_mask(self, mask): """ Sets indices to be masked Args: mask (torch.Tensor): tensor containing indices to be masked """ self.mask = mask def forward(self, input_0, input_1): primals_3 = self.linear_out.weight primals_4 = self.linear_out.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0], output[1]
Samteymoori/pepper
Attention
false
14,385
[ "MIT" ]
155
734d226de47a855952e3b58145c1fcfbe221d3b4
https://github.com/Samteymoori/pepper/tree/734d226de47a855952e3b58145c1fcfbe221d3b4
Mnist_NN
import torch import torch.nn as nn import torch.nn.functional as F class Mnist_NN(nn.Module): def __init__(self): super().__init__() self.lin1 = nn.Linear(784, 512, bias=True) self.lin2 = nn.Linear(512, 256, bias=True) self.lin3 = nn.Linear(256, 10, bias=True) def forward(self, xb): x = xb.view(-1, 784) x = F.relu(self.lin1(x)) x = F.relu(self.lin2(x)) return self.lin3(x) def get_inputs(): return [torch.rand([4, 784])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 784), (784, 1)) assert_size_stride(primals_2, (512, 784), (784, 1)) assert_size_stride(primals_3, (512,), (1,)) assert_size_stride(primals_4, (256, 512), (512, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (10, 256), (256, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(2048)](buf1, primals_3, 2048, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (512, 256), ( 1, 512), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(1024)](buf3, primals_5, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, primals_1, buf1, buf3, primals_6, primals_4 class Mnist_NNNew(nn.Module): def __init__(self): super().__init__() self.lin1 = nn.Linear(784, 512, bias=True) self.lin2 = nn.Linear(512, 256, bias=True) self.lin3 = nn.Linear(256, 10, bias=True) def forward(self, input_0): primals_2 = self.lin1.weight primals_3 = self.lin1.bias primals_4 = self.lin2.weight primals_5 = self.lin2.bias primals_6 = self.lin3.weight primals_7 = self.lin3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Sara-Rajaee/Deep_learning_explorations
Mnist_NN
false
14,386
[ "MIT" ]
154
d0c527f1cde61eea90bda01b073c5ac24565ebf1
https://github.com/Sara-Rajaee/Deep_learning_explorations/tree/d0c527f1cde61eea90bda01b073c5ac24565ebf1
ResNetBlock
import torch from torch import nn import torch.utils.data import torch.nn.parallel import torch.utils.data.distributed class ResNetBlock(nn.Module): def __init__(self, in_channel, out_channel, stride, downsample, pad, dilation): super(ResNetBlock, self).__init__() self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1) self.downsample = downsample self.stride = stride def forward(self, x): out = self.conv1(x) out = x + out return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'stride': 1, 'downsample': 4, 'pad': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.utils.data import torch.nn.parallel import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_out_ptr0 + x3, xmask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_3, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class ResNetBlockNew(nn.Module): def __init__(self, in_channel, out_channel, stride, downsample, pad, dilation): super(ResNetBlockNew, self).__init__() self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=1, padding=1) self.downsample = downsample self.stride = stride def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Sarah20187/X-StereoLab
ResNetBlock
false
14,387
[ "MIT" ]
192
9ae8c1413307e7df91b14a7f31e8a95f9e5754f9
https://github.com/Sarah20187/X-StereoLab/tree/9ae8c1413307e7df91b14a7f31e8a95f9e5754f9
SpatialGatherModule
import torch import torch.nn.functional as F import torch.nn as nn class SpatialGatherModule(nn.Module): def __init__(self, scale=1, **kwargs): super(SpatialGatherModule, self).__init__() self.scale = scale """forward""" def forward(self, features, probs): batch_size, num_classes, _h, _w = probs.size() probs = probs.view(batch_size, num_classes, -1) features = features.view(batch_size, features.size(1), -1) features = features.permute(0, 2, 1) probs = F.softmax(self.scale * probs, dim=2) ocr_context = torch.matmul(probs, features) ocr_context = ocr_context.permute(0, 2, 1).contiguous().unsqueeze(3) return ocr_context def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, float('-inf')) tmp6 = triton_helpers.max2(tmp5, 1)[:, None] tmp7 = tmp2 - tmp6 tmp8 = tmp7 * tmp1 tmp9 = tl_math.exp(tmp8) tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp12 = tl.where(xmask, tmp10, 0) tmp13 = tl.sum(tmp12, 1)[:, None] tmp14 = tmp9 / tmp13 tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) get_raw_stream(0) triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK= 8, num_warps=2, num_stages=1) del arg0_1 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3) del arg1_1 del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf3, buf4, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf3 return reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0), class SpatialGatherModuleNew(nn.Module): def __init__(self, scale=1, **kwargs): super(SpatialGatherModuleNew, self).__init__() self.scale = scale """forward""" def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SegmentationBLWX/sssegmentation
SpatialGatherModule
false
14,388
[ "MIT" ]
411
0b2e3ff5abd7b97e15ac8daf63ea214688c26541
https://github.com/SegmentationBLWX/sssegmentation/tree/0b2e3ff5abd7b97e15ac8daf63ea214688c26541
HardSigmoid
import torch import torch.nn as nn class HardSigmoid(nn.Module): """Implements the Had Mish activation module from `"H-Mish" <https://github.com/digantamisra98/H-Mish>`_ This activation is computed as follows: .. math:: f(x) = \\frac{x}{2} \\cdot \\min(2, \\max(0, x + 2)) """ def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.inplace = inplace def forward(self, x): return 0.5 * (x / (1 + torch.abs(x))) + 0.5 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_add_div_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.abs(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tmp5 = 0.5 tmp6 = tmp4 * tmp5 tmp7 = tmp6 + tmp5 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_add_div_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HardSigmoidNew(nn.Module): """Implements the Had Mish activation module from `"H-Mish" <https://github.com/digantamisra98/H-Mish>`_ This activation is computed as follows: .. math:: f(x) = \\frac{x}{2} \\cdot \\min(2, \\max(0, x + 2)) """ def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SevenMoGod/movenet.pytorch
HardSigmoid
false
14,389
[ "MIT" ]
87
95ec8535245228aa4335243e68722810e50bcaf8
https://github.com/SevenMoGod/movenet.pytorch/tree/95ec8535245228aa4335243e68722810e50bcaf8
ChannelAttentionModule
import torch import torch.nn.functional as F import torch.nn as nn class Scale(nn.Module): def __init__(self, scale=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) """forward""" def forward(self, x): return x * self.scale class ChannelAttentionModule(nn.Module): def __init__(self): super(ChannelAttentionModule, self).__init__() self.gamma = Scale(scale=0) """forward""" def forward(self, x): batch_size, channels, height, width = x.size() proj_query = x.view(batch_size, channels, -1) proj_key = x.view(batch_size, channels, -1).permute(0, 2, 1) energy = torch.bmm(proj_query, proj_key) energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy ) - energy attention = F.softmax(energy_new, dim=-1) proj_value = x.view(batch_size, channels, -1) out = torch.bmm(attention, proj_value) out = out.view(batch_size, channels, height, width) out = self.gamma(out) + x return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp0 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (), ()) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](buf4, primals_2, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf5, buf4 class Scale(nn.Module): def __init__(self, scale=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) """forward""" def forward(self, x): return x * self.scale class ChannelAttentionModuleNew(nn.Module): def __init__(self): super(ChannelAttentionModuleNew, self).__init__() self.gamma = Scale(scale=0) """forward""" def forward(self, input_0): primals_2 = self.gamma.scale primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
SegmentationBLWX/sssegmentation
ChannelAttentionModule
false
14,390
[ "MIT" ]
411
0b2e3ff5abd7b97e15ac8daf63ea214688c26541
https://github.com/SegmentationBLWX/sssegmentation/tree/0b2e3ff5abd7b97e15ac8daf63ea214688c26541
FeatureWiseAffine
import torch class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) class FeatureWiseAffine(BaseModule): def __init__(self): super(FeatureWiseAffine, self).__init__() def forward(self, x, scale, shift): outputs = scale * x + shift return outputs def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(256)](arg0_1, arg1_1, arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf0, class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) class FeatureWiseAffineNew(BaseModule): def __init__(self): super(FeatureWiseAffineNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Seungwoo0326/WaveGrad2-1
FeatureWiseAffine
false
14,391
[ "MIT" ]
45
3b202201348449b89353f28bce1596ca7939a810
https://github.com/Seungwoo0326/WaveGrad2-1/tree/3b202201348449b89353f28bce1596ca7939a810
MyLinear
import torch from torch import nn class MyLinear(nn.Module): def __init__(self, inp, outp): super(MyLinear, self).__init__() self.w = nn.Parameter(torch.randn(outp, inp)) self.b = nn.Parameter(torch.randn(outp)) def forward(self, x): x = x @ self.w.t() + self.b return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inp': 4, 'outp': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0) class MyLinearNew(nn.Module): def __init__(self, inp, outp): super(MyLinearNew, self).__init__() self.w = nn.Parameter(torch.randn(outp, inp)) self.b = nn.Parameter(torch.randn(outp)) def forward(self, input_0): primals_1 = self.w primals_3 = self.b primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Shadowalker1995/Tutorial-Resource
MyLinear
false
14,392
[ "Apache-2.0" ]
362
71fe3d521cf9971f708fa9978e9c685c0dda6ba6
https://github.com/Shadowalker1995/Tutorial-Resource/tree/71fe3d521cf9971f708fa9978e9c685c0dda6ba6
GLU
import torch import torch.nn as nn class GLU(nn.Module): def __init__(self): super(GLU, self).__init__() def forward(self, x): nc = x.size(1) assert nc % 2 == 0, 'channels dont divide 2!' nc = int(nc / 2) return x[:, :nc] * torch.sigmoid(x[:, nc:]) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp2 = tl.sigmoid(tmp1) tmp3 = tmp0 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sigmoid_0[grid(128)](arg0_1, buf0, 128, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GLUNew(nn.Module): def __init__(self): super(GLUNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SeungyounShin/c3-gan
GLU
false
14,393
[ "BSD-2-Clause" ]
105
1fae645674c896b4bcb93e910034519f470a6a96
https://github.com/SeungyounShin/c3-gan/tree/1fae645674c896b4bcb93e910034519f470a6a96
D_UpBlock
import torch import torch.utils.data from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class D_UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None): super(D_UpBlock, self).__init__() self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0, activation, norm=None) self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, x): x = self.conv(x) h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp10, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 16, 16), (1024, 256, 16, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused__prelu_kernel_convolution_1[grid(4096)](buf4, primals_6, primals_7, buf5, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf7, primals_9, primals_10, buf2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_9 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 16, 16), (1024, 256, 16, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(4096)](buf10, primals_12, primals_13, buf5, buf11, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_12 return (buf11, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, buf1, buf2, buf4, buf5, buf7, buf8, buf10) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class D_UpBlockNew(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None): super(D_UpBlockNew, self).__init__() self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0, activation, norm=None) self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_4 = self.conv.act.weight primals_5 = self.up_conv1.deconv.weight primals_6 = self.up_conv1.deconv.bias primals_7 = self.up_conv1.act.weight primals_8 = self.up_conv2.conv.weight primals_9 = self.up_conv2.conv.bias primals_10 = self.up_conv2.act.weight primals_11 = self.up_conv3.deconv.weight primals_12 = self.up_conv3.deconv.bias primals_13 = self.up_conv3.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
RyanMoussouni/iSeeBetter
D_UpBlock
false
14,394
[ "MIT" ]
327
af193ae0852f8e477fcd6875dce874eb5092a24a
https://github.com/RyanMoussouni/iSeeBetter/tree/af193ae0852f8e477fcd6875dce874eb5092a24a
JointBoneLoss
import torch class JointBoneLoss(torch.nn.Module): def __init__(self, joint_num): super(JointBoneLoss, self).__init__() id_i, id_j = [], [] for i in range(joint_num): for j in range(i + 1, joint_num): id_i.append(i) id_j.append(j) self.id_i = id_i self.id_j = id_j def forward(self, joint_out, joint_gt): J = torch.norm(joint_out[:, self.id_i, :] - joint_out[:, self.id_j, :], p=2, dim=-1, keepdim=False) Y = torch.norm(joint_gt[:, self.id_i, :] - joint_gt[:, self.id_j, : ], p=2, dim=-1, keepdim=False) loss = torch.abs(J - Y) loss = torch.sum(loss) / joint_out.shape[0] / len(self.id_i) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'joint_num': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl. constexpr): rnumel = 96 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rbase = tl.arange(0, RBLOCK)[None, :] _tmp65 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex // 4 % 6 r0 = rindex % 4 r2 = rindex // 24 tmp0 = r1 tmp1 = tl.full([1, 1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1, 1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1, 1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1, 1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1, 1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1, 1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp17 - tmp23 tmp25 = tmp24 * tmp24 tmp26 = tl.load(in_ptr0 + (1 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp27 = tl.load(in_ptr0 + (1 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 - tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp25 + tmp29 tmp31 = tl.load(in_ptr0 + (2 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp32 = tl.load(in_ptr0 + (2 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp33 = tmp31 - tmp32 tmp34 = tmp33 * tmp33 tmp35 = tmp30 + tmp34 tmp36 = tl.load(in_ptr0 + (3 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp37 = tl.load(in_ptr0 + (3 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp38 = tmp36 - tmp37 tmp39 = tmp38 * tmp38 tmp40 = tmp35 + tmp39 tmp41 = tl.load(in_ptr1 + (4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp42 = tl.load(in_ptr1 + (4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp41 - tmp42 tmp44 = tmp43 * tmp43 tmp45 = tl.load(in_ptr1 + (1 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp46 = tl.load(in_ptr1 + (1 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp47 = tmp45 - tmp46 tmp48 = tmp47 * tmp47 tmp49 = tmp44 + tmp48 tmp50 = tl.load(in_ptr1 + (2 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp51 = tl.load(in_ptr1 + (2 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp52 = tmp50 - tmp51 tmp53 = tmp52 * tmp52 tmp54 = tmp49 + tmp53 tmp55 = tl.load(in_ptr1 + (3 + 4 * r0 + 16 * tmp16 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp56 = tl.load(in_ptr1 + (3 + 4 * r0 + 16 * tmp22 + 64 * r2), rmask, eviction_policy='evict_last', other=0.0) tmp57 = tmp55 - tmp56 tmp58 = tmp57 * tmp57 tmp59 = tmp54 + tmp58 tmp60 = libdevice.sqrt(tmp40) tmp61 = libdevice.sqrt(tmp59) tmp62 = tmp60 - tmp61 tmp63 = tl_math.abs(tmp62) tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = _tmp65 + tmp64 _tmp65 = tl.where(rmask, tmp66, _tmp65) tmp65 = tl.sum(_tmp65, 1)[:, None] tmp67 = 0.25 tmp68 = tmp65 * tmp67 tmp69 = 0.16666666666666666 tmp70 = tmp68 * tmp69 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp70, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 get_raw_stream(0) triton_red_fused_abs_div_index_linalg_vector_norm_sub_sum_0[grid(1)]( buf3, arg0_1, arg1_1, 1, 96, XBLOCK=1, RBLOCK=64, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class JointBoneLossNew(torch.nn.Module): def __init__(self, joint_num): super(JointBoneLossNew, self).__init__() id_i, id_j = [], [] for i in range(joint_num): for j in range(i + 1, joint_num): id_i.append(i) id_j.append(j) self.id_i = id_i self.id_j = id_j def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
SevenMoGod/movenet.pytorch
JointBoneLoss
false
14,395
[ "MIT" ]
87
95ec8535245228aa4335243e68722810e50bcaf8
https://github.com/SevenMoGod/movenet.pytorch/tree/95ec8535245228aa4335243e68722810e50bcaf8
GCN
import math import torch from torch import nn from torch.nn import functional as F class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features) ) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): """ :param input: :param adj: :return: """ support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, x, adj): """ :param x: [2708, 1433] :param adj: [2708, 2708] :return: """ x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = self.gc2(x, adj) return F.log_softmax(x, dim=1) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_3, buf0, out=buf1) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_relu_0[grid(16)](buf2, primals_4, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_4 buf3 = buf0 del buf0 extern_kernels.mm(buf2, primals_5, out=buf3) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_3, buf3, alpha=1, beta=1, out=buf4) del primals_6 buf5 = buf3 del buf3 triton_poi_fused__log_softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf4 del buf4 triton_poi_fused__log_softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf5 return buf6, buf2, buf6, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.FloatTensor(in_features, out_features) ) if bias: self.bias = nn.Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): """ :param input: :param adj: :return: """ support = torch.mm(input, self.weight) output = torch.spmm(adj, support) if self.bias is not None: return output + self.bias else: return output def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GCNNew(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout): super(GCNNew, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid) self.gc2 = GraphConvolution(nhid, nclass) self.dropout = dropout def forward(self, input_0, input_1): primals_1 = self.gc1.weight primals_4 = self.gc1.bias primals_2 = self.gc2.weight primals_6 = self.gc2.bias primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Shadowalker1995/Tutorial-Resource
GCN
false
14,396
[ "Apache-2.0" ]
362
71fe3d521cf9971f708fa9978e9c685c0dda6ba6
https://github.com/Shadowalker1995/Tutorial-Resource/tree/71fe3d521cf9971f708fa9978e9c685c0dda6ba6
TensorPermute
import torch import torch.utils.data class TensorPermute(torch.nn.Module): """ Convert a torch.FloatTensor of shape (NUM_IMAGES x CHANNELS x HEIGHT x WIDTH) to a torch.FloatTensor of shape (CHANNELS x NUM_IMAGES x HEIGHT x WIDTH). """ def forward(self, tensor): return tensor.permute(1, 0, 2, 3).contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class TensorPermuteNew(torch.nn.Module): """ Convert a torch.FloatTensor of shape (NUM_IMAGES x CHANNELS x HEIGHT x WIDTH) to a torch.FloatTensor of shape (CHANNELS x NUM_IMAGES x HEIGHT x WIDTH). """ def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SheffieldAI/pykale
TensorPermute
false
14,397
[ "MIT" ]
324
be7670941fb06835883c80477b26702d407017db
https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db
PredictionHead
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data class PredictionHead(nn.Module): """ Simple classification prediction-head block to plug ontop of the 4D output of a CNN. Args: num_classes: the number of different classes that can be predicted. input_shape: the shape that input to this head will have. Expected to be (batch_size, channels, height, width) """ def __init__(self, num_classes, input_shape): super(PredictionHead, self).__init__() self.avgpool = nn.AvgPool2d(input_shape[2]) self.linear = nn.Linear(input_shape[1], num_classes) def forward(self, x): x = self.avgpool(x) x = torch.flatten(x, 1) x = self.linear(x) return F.log_softmax(x, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_classes': 4, 'input_shape': [4, 4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf1) del primals_2 del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__log_softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 return buf3, reinterpret_tensor(buf0, (4, 4), (4, 1), 0), buf3 class PredictionHeadNew(nn.Module): """ Simple classification prediction-head block to plug ontop of the 4D output of a CNN. Args: num_classes: the number of different classes that can be predicted. input_shape: the shape that input to this head will have. Expected to be (batch_size, channels, height, width) """ def __init__(self, num_classes, input_shape): super(PredictionHeadNew, self).__init__() self.avgpool = nn.AvgPool2d(input_shape[2]) self.linear = nn.Linear(input_shape[1], num_classes) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SheffieldAI/pykale
PredictionHead
false
14,398
[ "MIT" ]
324
be7670941fb06835883c80477b26702d407017db
https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db
SReLU
import torch import torch.nn as nn import torch.utils.data class SReLU(nn.Module): """Shifted ReLU""" def __init__(self, nc): super(SReLU, self).__init__() self.srelu_bias = nn.Parameter(torch.Tensor(1, nc, 1, 1)) self.srelu_relu = nn.ReLU(inplace=True) nn.init.constant_(self.srelu_bias, -1.0) def forward(self, x): return self.srelu_relu(x - self.srelu_bias) + self.srelu_bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'nc': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_relu_sub_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp4 + tmp1 tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(out_ptr0 + x3, tmp5, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_sub_threshold_backward_0[grid(256)](primals_2 , primals_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1 ) del primals_1 del primals_2 return buf0, buf1 class SReLUNew(nn.Module): """Shifted ReLU""" def __init__(self, nc): super(SReLUNew, self).__init__() self.srelu_bias = nn.Parameter(torch.Tensor(1, nc, 1, 1)) self.srelu_relu = nn.ReLU(inplace=True) nn.init.constant_(self.srelu_bias, -1.0) def forward(self, input_0): primals_1 = self.srelu_bias primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
SheffieldAI/pykale
SReLU
false
14,399
[ "MIT" ]
324
be7670941fb06835883c80477b26702d407017db
https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db
GHMIoU
import torch import torch.nn.functional as F import torch.nn as nn class GHMIoU(nn.Module): """GHM IoU prediction loss Details of the theorem can be viewed in the paper "Gradient Harmonized Single-stage Detector". https://arxiv.org/abs/1811.05181 Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMIoU, self).__init__() self.bins = bins self.momentum = momentum self.edges = torch.arange(bins + 1).float() / bins self.edges[-1] += 1e-06 if momentum > 0: self.acc_sum = torch.zeros(bins) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, pred, target, label_weight, *args, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch*A*width*height]): IoU prediction for each regression anchor target (float tensor of size [batch*A*width*height]): store the iou between predicted boxes and its corresponding groundtruth boxes for the positives and the iou between the predicted boxes and anchors for negatives. label_weight (float tensor of size [[batch*A*width*height]): 1 for positives and 0 for others. Returns: The gradient harmonized loss. """ target, label_weight = target.float(), label_weight.float() target = target.detach() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] + (1 - mmt ) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits(pred, target, weights, reduction='sum') / tot return loss * self.loss_weight def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_gt_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = tmp2.to(tl.float32) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None) tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp6, None) @triton.jit def triton_poi_fused_zeros_like_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_abs_sigmoid_sub_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 - tmp2 tmp4 = tl_math.abs(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_gt_sum_0[grid(1)](arg1_1, buf0, buf1, 1, 256, num_warps=2, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_zeros_like_1[grid(256)](buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_abs_sigmoid_sub_2[grid(256)](arg2_1, arg0_1, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 return buf1, arg0_1, buf2, buf3, buf0 class GHMIoUNew(nn.Module): """GHM IoU prediction loss Details of the theorem can be viewed in the paper "Gradient Harmonized Single-stage Detector". https://arxiv.org/abs/1811.05181 Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMIoUNew, self).__init__() self.bins = bins self.momentum = momentum self.edges = torch.arange(bins + 1).float() / bins self.edges[-1] += 1e-06 if momentum > 0: self.acc_sum = torch.zeros(bins) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization
GHMIoU
false
14,400
[ "Apache-2.0" ]
62
67b8955eb59137590dbadc6aac45529ae9459e4a
https://github.com/ShegnkaiWu/IoU-aware-single-stage-object-detector-for-accurate-localization/tree/67b8955eb59137590dbadc6aac45529ae9459e4a
ZoneOutBiLSTM
import torch import torch.nn as nn class LinearNorm(nn.Module): """ LinearNorm Projection """ def __init__(self, in_features, out_features, bias=False): super(LinearNorm, self).__init__() self.linear = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(self.linear.weight) if bias: nn.init.constant_(self.linear.bias, 0.0) def forward(self, x): x = self.linear(x) return x class ZoneOutCell(nn.Module): """ZoneOut Cell module. This is a module of zoneout described in `Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`_. This code is modified from `eladhoffer/seq2seq.pytorch`_. Examples: >>> lstm = torch.nn.LSTMCell(16, 32) >>> lstm = ZoneOutCell(lstm, 0.5) .. _`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`: https://arxiv.org/abs/1606.01305 .. _`eladhoffer/seq2seq.pytorch`: https://github.com/eladhoffer/seq2seq.pytorch """ def __init__(self, cell, zoneout_rate=0.1): """Initialize zone out cell module. Args: cell (torch.nn.Module): Pytorch recurrent cell module e.g. `torch.nn.Module.LSTMCell`. zoneout_rate (float, optional): Probability of zoneout from 0.0 to 1.0. """ super(ZoneOutCell, self).__init__() self.cell = cell self.hidden_size = cell.hidden_size self.zoneout_rate = zoneout_rate if zoneout_rate > 1.0 or zoneout_rate < 0.0: raise ValueError( 'zoneout probability must be in the range from 0.0 to 1.0.') def forward(self, inputs, hidden): """Calculate forward propagation. Args: inputs (Tensor): Batch of input tensor (B, input_size). hidden (tuple): - Tensor: Batch of initial hidden states (B, hidden_size). - Tensor: Batch of initial cell states (B, hidden_size). Returns: tuple: - Tensor: Batch of next hidden states (B, hidden_size). - Tensor: Batch of next cell states (B, hidden_size). """ next_hidden = self.cell(inputs, hidden) next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate) return next_hidden def _zoneout(self, h, next_h, prob): if isinstance(h, tuple): num_h = len(h) if not isinstance(prob, tuple): prob = tuple([prob] * num_h) return tuple([self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)]) if self.training: mask = h.new(*h.size()).bernoulli_(prob) return mask * h + (1 - mask) * next_h else: return prob * h + (1 - prob) * next_h class ZoneOutBiLSTM(nn.Module): """ ZoneOut Bi-LSTM """ def __init__(self, hidden_dim, zoneout_rate=0.1): super(ZoneOutBiLSTM, self).__init__() self.hidden_dim = hidden_dim self.lstm_cell_forward = ZoneOutCell(nn.LSTMCell(self.hidden_dim, self.hidden_dim), zoneout_rate) self.lstm_cell_backward = ZoneOutCell(nn.LSTMCell(self.hidden_dim, self.hidden_dim), zoneout_rate) self.linear = LinearNorm(self.hidden_dim * 2, self.hidden_dim) def forward(self, x): batch_size, seq_len, device = x.size(0), x.size(1), x.device hs_forward = torch.zeros(batch_size, self.hidden_dim, device=device) cs_forward = torch.zeros(batch_size, self.hidden_dim, device=device) hs_backward = torch.zeros(batch_size, self.hidden_dim, device=device) cs_backward = torch.zeros(batch_size, self.hidden_dim, device=device) torch.nn.init.kaiming_normal_(hs_forward) torch.nn.init.kaiming_normal_(cs_forward) torch.nn.init.kaiming_normal_(hs_backward) torch.nn.init.kaiming_normal_(cs_backward) forward = [] backward = [] x = x.view(seq_len, batch_size, -1) for i in range(seq_len): hs_forward, cs_forward = self.lstm_cell_forward(x[i], ( hs_forward, cs_forward)) forward.append(hs_forward) for i in reversed(range(seq_len)): hs_backward, cs_backward = self.lstm_cell_backward(x[i], ( hs_backward, cs_backward)) backward.append(hs_backward) out = torch.cat([torch.stack(forward), torch.stack(backward)], dim=-1) out = self.linear(out.view(batch_size, seq_len, -1)) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_out_ptr0 + x0, xmask) tmp7 = tl.load(in_ptr1 + x0, xmask) tmp10 = tl.load(in_out_ptr1 + x0, xmask) tmp13 = tl.load(in_ptr2 + x0, xmask) tmp1 = 0.1 tmp2 = tmp0 * tmp1 tmp4 = 0.9 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp8 = tmp7 * tmp4 tmp9 = tmp2 + tmp8 tmp11 = tmp10 * tmp4 tmp12 = tmp2 + tmp11 tmp14 = tmp13 * tmp4 tmp15 = tmp2 + tmp14 tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + x0, tmp9, xmask) tl.store(in_out_ptr1 + x0, tmp12, xmask) tl.store(out_ptr1 + x0, tmp15, xmask) @triton.jit def triton_poi_fused_add_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = tmp0 * tmp1 tmp4 = 0.9 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask) tmp1 = 0.1 tmp2 = tmp0 * tmp1 tmp4 = 0.9 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp19 = tl.load(in_ptr2 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp20 = 0.1 tmp21 = tmp19 * tmp20 tmp22 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp16 & xmask, other=0.0) tmp23 = 0.9 tmp24 = tmp22 * tmp23 tmp25 = tmp21 + tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp16, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp15, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4), (4, 1)) assert_size_stride(primals_3, (16, 4), (4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (16, 4), (4, 1)) assert_size_stride(primals_7, (16, 4), (4, 1)) assert_size_stride(primals_8, (16,), (1,)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (4, 8), (8, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = torch.ops.aten.normal_functional.default(buf0, 0.0, 0.7071067811865476) buf2 = buf1 del buf1 buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32) extern_kernels.mm(buf2, reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf4) buf5 = torch.ops.aten._thnn_fused_lstm_cell.default(buf3, buf4, buf2, primals_4, primals_5) buf6 = buf5[0] buf7 = buf5[1] buf8 = buf5[2] del buf5 buf33 = buf4 del buf4 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf33) buf34 = buf3 del buf3 extern_kernels.mm(buf2, reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf34) buf35 = torch.ops.aten._thnn_fused_lstm_cell.default(buf33, buf34, buf2, primals_8, primals_9) buf36 = buf35[0] buf37 = buf35[1] buf9 = buf6 del buf6 buf10 = buf0 del buf0 buf39 = buf36 del buf36 buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_1[grid(16)](buf9, buf39, buf2, buf7, buf37, buf10, buf40, 16, XBLOCK=16, num_warps=1, num_stages=1) buf11 = buf34 del buf34 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf11) buf12 = buf33 del buf33 extern_kernels.mm(buf9, reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf12) buf13 = torch.ops.aten._thnn_fused_lstm_cell.default(buf11, buf12, buf10, primals_4, primals_5) buf14 = buf13[0] buf15 = buf13[1] buf16 = buf13[2] del buf13 buf17 = buf14 del buf14 triton_poi_fused_add_mul_2[grid(16)](buf17, buf9, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(16)](buf10, buf15, buf18, 16, XBLOCK=16, num_warps=1, num_stages=1) buf19 = buf12 del buf12 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf19) buf20 = buf11 del buf11 extern_kernels.mm(buf17, reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf20) buf21 = torch.ops.aten._thnn_fused_lstm_cell.default(buf19, buf20, buf18, primals_4, primals_5) buf22 = buf21[0] buf23 = buf21[1] buf24 = buf21[2] del buf21 buf25 = buf22 del buf22 triton_poi_fused_add_mul_2[grid(16)](buf25, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(16)](buf18, buf23, buf26, 16, XBLOCK=16, num_warps=1, num_stages=1) buf27 = buf20 del buf20 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf27) del primals_2 buf28 = buf19 del buf19 extern_kernels.mm(buf25, reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), out=buf28) buf29 = torch.ops.aten._thnn_fused_lstm_cell.default(buf27, buf28, buf26, primals_4, primals_5) del primals_4 del primals_5 buf30 = buf29[0] buf31 = buf29[1] buf32 = buf29[2] del buf29 buf38 = buf35[2] del buf35 buf41 = buf28 del buf28 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf41) buf42 = buf27 del buf27 extern_kernels.mm(buf39, reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf42) buf43 = torch.ops.aten._thnn_fused_lstm_cell.default(buf41, buf42, buf40, primals_8, primals_9) buf44 = buf43[0] buf45 = buf43[1] buf46 = buf43[2] del buf43 buf47 = buf44 del buf44 triton_poi_fused_add_mul_2[grid(16)](buf47, buf39, 16, XBLOCK=16, num_warps=1, num_stages=1) buf48 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(16)](buf40, buf45, buf48, 16, XBLOCK=16, num_warps=1, num_stages=1) buf49 = buf42 del buf42 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf49) buf50 = buf41 del buf41 extern_kernels.mm(buf47, reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf50) buf51 = torch.ops.aten._thnn_fused_lstm_cell.default(buf49, buf50, buf48, primals_8, primals_9) buf52 = buf51[0] buf53 = buf51[1] buf54 = buf51[2] del buf51 buf55 = buf52 del buf52 triton_poi_fused_add_mul_2[grid(16)](buf55, buf47, 16, XBLOCK=16, num_warps=1, num_stages=1) buf56 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(16)](buf48, buf53, buf56, 16, XBLOCK=16, num_warps=1, num_stages=1) buf57 = buf50 del buf50 extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf57) del primals_6 buf58 = buf49 del buf49 extern_kernels.mm(buf55, reinterpret_tensor(primals_7, (4, 16), (1, 4), 0), out=buf58) buf59 = torch.ops.aten._thnn_fused_lstm_cell.default(buf57, buf58, buf56, primals_8, primals_9) del primals_8 del primals_9 buf60 = buf59[0] buf61 = buf59[1] buf62 = buf59[2] del buf59 buf63 = reinterpret_tensor(buf58, (16, 4), (4, 1), 0) del buf58 triton_poi_fused_stack_4[grid(64)](buf9, buf17, buf25, buf30, buf63, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf30 buf64 = reinterpret_tensor(buf57, (16, 4), (4, 1), 0) del buf57 triton_poi_fused_stack_4[grid(64)](buf39, buf47, buf55, buf60, buf64, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf60 buf65 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_5[grid(128)](buf63, buf64, buf65, 128, XBLOCK= 128, num_warps=4, num_stages=1) del buf63 buf66 = buf64 del buf64 extern_kernels.mm(reinterpret_tensor(buf65, (16, 8), (8, 1), 0), reinterpret_tensor(primals_10, (8, 4), (1, 8), 0), out=buf66) return (reinterpret_tensor(buf66, (4, 4, 4), (16, 4, 1), 0), buf2, reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), buf7, buf8, buf9, buf10, reinterpret_tensor(primals_1, (4, 4), (4, 1), 16), buf15, buf16, buf17, buf18, reinterpret_tensor(primals_1, (4, 4), (4, 1), 32), buf23, buf24, buf25, buf26, reinterpret_tensor(primals_1, (4, 4), (4, 1), 48), buf31, buf32, buf37, buf38, buf39, buf40, buf45, buf46, buf47, buf48, buf53, buf54, buf55, buf56, buf61, buf62, reinterpret_tensor(buf65, (16, 8), (8, 1), 0), primals_10, primals_7, primals_3) class LinearNorm(nn.Module): """ LinearNorm Projection """ def __init__(self, in_features, out_features, bias=False): super(LinearNorm, self).__init__() self.linear = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(self.linear.weight) if bias: nn.init.constant_(self.linear.bias, 0.0) def forward(self, x): x = self.linear(x) return x class ZoneOutCell(nn.Module): """ZoneOut Cell module. This is a module of zoneout described in `Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`_. This code is modified from `eladhoffer/seq2seq.pytorch`_. Examples: >>> lstm = torch.nn.LSTMCell(16, 32) >>> lstm = ZoneOutCell(lstm, 0.5) .. _`Zoneout: Regularizing RNNs by Randomly Preserving Hidden Activations`: https://arxiv.org/abs/1606.01305 .. _`eladhoffer/seq2seq.pytorch`: https://github.com/eladhoffer/seq2seq.pytorch """ def __init__(self, cell, zoneout_rate=0.1): """Initialize zone out cell module. Args: cell (torch.nn.Module): Pytorch recurrent cell module e.g. `torch.nn.Module.LSTMCell`. zoneout_rate (float, optional): Probability of zoneout from 0.0 to 1.0. """ super(ZoneOutCell, self).__init__() self.cell = cell self.hidden_size = cell.hidden_size self.zoneout_rate = zoneout_rate if zoneout_rate > 1.0 or zoneout_rate < 0.0: raise ValueError( 'zoneout probability must be in the range from 0.0 to 1.0.') def forward(self, inputs, hidden): """Calculate forward propagation. Args: inputs (Tensor): Batch of input tensor (B, input_size). hidden (tuple): - Tensor: Batch of initial hidden states (B, hidden_size). - Tensor: Batch of initial cell states (B, hidden_size). Returns: tuple: - Tensor: Batch of next hidden states (B, hidden_size). - Tensor: Batch of next cell states (B, hidden_size). """ next_hidden = self.cell(inputs, hidden) next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate) return next_hidden def _zoneout(self, h, next_h, prob): if isinstance(h, tuple): num_h = len(h) if not isinstance(prob, tuple): prob = tuple([prob] * num_h) return tuple([self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)]) if self.training: mask = h.new(*h.size()).bernoulli_(prob) return mask * h + (1 - mask) * next_h else: return prob * h + (1 - prob) * next_h class ZoneOutBiLSTMNew(nn.Module): """ ZoneOut Bi-LSTM """ def __init__(self, hidden_dim, zoneout_rate=0.1): super(ZoneOutBiLSTMNew, self).__init__() self.hidden_dim = hidden_dim self.lstm_cell_forward = ZoneOutCell(nn.LSTMCell(self.hidden_dim, self.hidden_dim), zoneout_rate) self.lstm_cell_backward = ZoneOutCell(nn.LSTMCell(self.hidden_dim, self.hidden_dim), zoneout_rate) self.linear = LinearNorm(self.hidden_dim * 2, self.hidden_dim) def forward(self, input_0): primals_2 = self.lstm_cell_forward.cell.weight_ih primals_3 = self.lstm_cell_forward.cell.weight_hh primals_4 = self.lstm_cell_forward.cell.bias_ih primals_5 = self.lstm_cell_forward.cell.bias_hh primals_6 = self.lstm_cell_backward.cell.weight_ih primals_7 = self.lstm_cell_backward.cell.weight_hh primals_8 = self.lstm_cell_backward.cell.bias_ih primals_9 = self.lstm_cell_backward.cell.bias_hh primals_10 = self.linear.linear.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
Seungwoo0326/WaveGrad2-1
ZoneOutBiLSTM
false
14,401
[ "MIT" ]
45
3b202201348449b89353f28bce1596ca7939a810
https://github.com/Seungwoo0326/WaveGrad2-1/tree/3b202201348449b89353f28bce1596ca7939a810
ConvNet
import torch import torch.nn as nn from torch.nn import functional as F class ConvNet(nn.Module): """LeNet++ as described in the Center Loss paper.""" def __init__(self, num_classes): super(ConvNet, self).__init__() self.conv1_1 = nn.Conv2d(1, 32, 5, stride=1, padding=2) self.prelu1_1 = nn.PReLU() self.conv1_2 = nn.Conv2d(32, 32, 5, stride=1, padding=2) self.prelu1_2 = nn.PReLU() self.conv2_1 = nn.Conv2d(32, 64, 5, stride=1, padding=2) self.prelu2_1 = nn.PReLU() self.conv2_2 = nn.Conv2d(64, 64, 5, stride=1, padding=2) self.prelu2_2 = nn.PReLU() self.conv3_1 = nn.Conv2d(64, 128, 5, stride=1, padding=2) self.prelu3_1 = nn.PReLU() self.conv3_2 = nn.Conv2d(128, 128, 5, stride=1, padding=2) self.prelu3_2 = nn.PReLU() self.fc1 = nn.Linear(128 * 3 * 3, 2) self.prelu_fc1 = nn.PReLU() self.fc2 = nn.Linear(2, num_classes) def forward(self, x): x = self.prelu1_1(self.conv1_1(x)) x = self.prelu1_2(self.conv1_2(x)) x = F.max_pool2d(x, 2) x = self.prelu2_1(self.conv2_1(x)) x = self.prelu2_2(self.conv2_2(x)) x = F.max_pool2d(x, 2) x = self.prelu3_1(self.conv3_1(x)) x = self.prelu3_2(self.conv3_2(x)) x = F.max_pool2d(x, 2) x = x.view(-1, 128 * 3 * 3) x = self.prelu_fc1(self.fc1(x)) y = self.fc2(x) return x, y def get_inputs(): return [torch.rand([4, 1, 24, 24])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 32 * x2 + 800 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 25 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_5(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 128 xnumel = 576 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 32 y1 = yindex // 32 tmp0 = tl.load(in_ptr0 + (x2 + 576 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (y0 + 32 * x2 + 18432 * y1), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__prelu_kernel_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_7(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 % 12 x2 = xindex // 384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 1536 * x2), None) tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 1536 * x2), None) tmp3 = tl.load(in_ptr0 + (768 + x0 + 64 * x1 + 1536 * x2), None) tmp5 = tl.load(in_ptr0 + (800 + x0 + 64 * x1 + 1536 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_9(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 9216 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x1 = xindex // 64 % 6 x2 = xindex // 384 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 1536 * x2), xmask) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 1536 * x2), xmask) tmp3 = tl.load(in_ptr0 + (768 + x0 + 128 * x1 + 1536 * x2), xmask) tmp5 = tl.load(in_ptr0 + (832 + x0 + 128 * x1 + 1536 * x2), xmask) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, xmask) tl.store(out_ptr1 + x3, tmp16, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_11(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp8, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 36 xnumel = 128 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 3 y1 = yindex // 3 y5 = yindex y4 = yindex // 9 y6 = yindex % 9 tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (768 + x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (896 + x2 + 256 * y0 + 1536 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 9 * x2 + 1152 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused__prelu_kernel_13(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24) = args args.clear() assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 24, 24), (576, 576, 24, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (32, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_6, (32,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (64, 32, 5, 5), (800, 25, 5, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (64, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_12, (64,), (1,)) assert_size_stride(primals_13, (1,), (1,)) assert_size_stride(primals_14, (128, 64, 5, 5), (1600, 25, 5, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (1,), (1,)) assert_size_stride(primals_17, (128, 128, 5, 5), (3200, 25, 5, 1)) assert_size_stride(primals_18, (128,), (1,)) assert_size_stride(primals_19, (1,), (1,)) assert_size_stride(primals_20, (2, 1152), (1152, 1)) assert_size_stride(primals_21, (2,), (1,)) assert_size_stride(primals_22, (1,), (1,)) assert_size_stride(primals_23, (4, 2), (2, 1)) assert_size_stride(primals_24, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((32, 32, 5, 5), (800, 1, 160, 32), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 25)](primals_5, buf0, 1024, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_5 buf1 = empty_strided_cuda((64, 32, 5, 5), (800, 1, 160, 32), torch. float32) triton_poi_fused_1[grid(2048, 25)](primals_8, buf1, 2048, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_8 buf2 = empty_strided_cuda((64, 64, 5, 5), (1600, 1, 320, 64), torch .float32) triton_poi_fused_2[grid(4096, 25)](primals_11, buf2, 4096, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_11 buf3 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32) triton_poi_fused_3[grid(8192, 25)](primals_14, buf3, 8192, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_14 buf4 = empty_strided_cuda((128, 128, 5, 5), (3200, 1, 640, 128), torch.float32) triton_poi_fused_4[grid(16384, 25)](primals_17, buf4, 16384, 25, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_17 buf5 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 32, 24, 24), (18432, 576, 24, 1)) buf6 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32) triton_poi_fused_convolution_5[grid(128, 576)](buf5, primals_2, buf6, 128, 576, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 buf7 = reinterpret_tensor(buf5, (4, 32, 24, 24), (18432, 1, 768, 32), 0 ) del buf5 triton_poi_fused__prelu_kernel_6[grid(73728)](buf6, primals_4, buf7, 73728, XBLOCK=512, num_warps=8, num_stages=1) buf8 = extern_kernels.convolution(buf7, buf0, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 32, 24, 24), (18432, 1, 768, 32)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 32, 24, 24), (18432, 1, 768, 32), torch.float32) triton_poi_fused__prelu_kernel_convolution_7[grid(73728)](buf9, primals_6, primals_7, buf10, 73728, XBLOCK=1024, num_warps=4, num_stages=1) del primals_6 buf11 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.float32) buf12 = empty_strided_cuda((4, 32, 12, 12), (4608, 1, 384, 32), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(18432)](buf10, buf11, buf12, 18432, XBLOCK=256, num_warps=4, num_stages=1) buf13 = extern_kernels.convolution(buf11, buf1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 64, 12, 12), (9216, 1, 768, 64)) buf14 = buf13 del buf13 buf15 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32) triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf14, primals_9, primals_10, buf15, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_9 buf16 = extern_kernels.convolution(buf15, buf2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf16, (4, 64, 12, 12), (9216, 1, 768, 64)) buf17 = buf16 del buf16 buf18 = empty_strided_cuda((4, 64, 12, 12), (9216, 1, 768, 64), torch.float32) triton_poi_fused__prelu_kernel_convolution_9[grid(36864)](buf17, primals_12, primals_13, buf18, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_12 buf19 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch .float32) buf20 = empty_strided_cuda((4, 64, 6, 6), (2304, 1, 384, 64), torch .int8) triton_poi_fused_max_pool2d_with_indices_10[grid(9216)](buf18, buf19, buf20, 9216, XBLOCK=256, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 6, 6), (4608, 1, 768, 128)) buf22 = buf21 del buf21 buf23 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32) triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf22, primals_15, primals_16, buf23, 18432, XBLOCK=128, num_warps=4, num_stages=1) del primals_15 buf24 = extern_kernels.convolution(buf23, buf4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf24, (4, 128, 6, 6), (4608, 1, 768, 128)) buf25 = buf24 del buf24 buf26 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32) triton_poi_fused__prelu_kernel_convolution_11[grid(18432)](buf25, primals_18, primals_19, buf26, 18432, XBLOCK=128, num_warps=4, num_stages=1) del primals_18 buf27 = empty_strided_cuda((4, 128, 3, 3), (1152, 1, 384, 128), torch.int8) buf28 = empty_strided_cuda((4, 128, 3, 3), (1152, 9, 3, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_12[grid(36, 128)](buf26, buf27, buf28, 36, 128, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) buf29 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_21, reinterpret_tensor(buf28, (4, 1152 ), (1152, 1), 0), reinterpret_tensor(primals_20, (1152, 2), (1, 1152), 0), alpha=1, beta=1, out=buf29) del primals_21 buf30 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused__prelu_kernel_13[grid(8)](buf29, primals_22, buf30, 8, XBLOCK=8, num_warps=1, num_stages=1) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_24, buf30, reinterpret_tensor( primals_23, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf31) del primals_24 return (buf30, buf31, primals_1, primals_3, primals_4, buf0, primals_7, buf1, primals_10, buf2, primals_13, buf3, primals_16, buf4, primals_19, primals_22, buf6, buf7, buf9, buf10, buf11, buf12, buf14, buf15, buf17, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, reinterpret_tensor(buf28, (4, 1152), (1152, 1), 0), buf29, buf30, primals_23, primals_20) class ConvNetNew(nn.Module): """LeNet++ as described in the Center Loss paper.""" def __init__(self, num_classes): super(ConvNetNew, self).__init__() self.conv1_1 = nn.Conv2d(1, 32, 5, stride=1, padding=2) self.prelu1_1 = nn.PReLU() self.conv1_2 = nn.Conv2d(32, 32, 5, stride=1, padding=2) self.prelu1_2 = nn.PReLU() self.conv2_1 = nn.Conv2d(32, 64, 5, stride=1, padding=2) self.prelu2_1 = nn.PReLU() self.conv2_2 = nn.Conv2d(64, 64, 5, stride=1, padding=2) self.prelu2_2 = nn.PReLU() self.conv3_1 = nn.Conv2d(64, 128, 5, stride=1, padding=2) self.prelu3_1 = nn.PReLU() self.conv3_2 = nn.Conv2d(128, 128, 5, stride=1, padding=2) self.prelu3_2 = nn.PReLU() self.fc1 = nn.Linear(128 * 3 * 3, 2) self.prelu_fc1 = nn.PReLU() self.fc2 = nn.Linear(2, num_classes) def forward(self, input_0): primals_1 = self.conv1_1.weight primals_2 = self.conv1_1.bias primals_4 = self.prelu1_1.weight primals_5 = self.conv1_2.weight primals_6 = self.conv1_2.bias primals_7 = self.prelu1_2.weight primals_8 = self.conv2_1.weight primals_9 = self.conv2_1.bias primals_10 = self.prelu2_1.weight primals_11 = self.conv2_2.weight primals_12 = self.conv2_2.bias primals_13 = self.prelu2_2.weight primals_14 = self.conv3_1.weight primals_15 = self.conv3_1.bias primals_16 = self.prelu3_1.weight primals_17 = self.conv3_2.weight primals_18 = self.conv3_2.bias primals_19 = self.prelu3_2.weight primals_20 = self.fc1.weight primals_21 = self.fc1.bias primals_22 = self.prelu_fc1.weight primals_23 = self.fc2.weight primals_24 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24]) return output[0], output[1]
SJHBXShub/Center_Loss
ConvNet
false
14,402
[ "MIT" ]
813
4097709144cf4cfc04d91ac1462ebf346b9f0448
https://github.com/SJHBXShub/Center_Loss/tree/4097709144cf4cfc04d91ac1462ebf346b9f0448
VideoBoringModel
import torch import torch.nn as nn import torch.utils.data class VideoBoringModel(nn.Module): def __init__(self, in_channel): super().__init__() self.avg_pool3d = nn.AdaptiveAvgPool3d(1) self.fc = nn.Linear(in_channel, 1024) def forward(self, x): x = self.avg_pool3d(x).squeeze() x = self.fc(x) return x def output_size(self): return self.fc.in_features def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1024, 4), (4, 1)) assert_size_stride(primals_3, (1024,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(4)](buf1, primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((1, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf1, (1, 4), (0, 1), 0), reinterpret_tensor(primals_2, (4, 1024), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return reinterpret_tensor(buf2, (1024,), (1,), 0), reinterpret_tensor(buf1, (1, 4), (4, 1), 0) class VideoBoringModelNew(nn.Module): def __init__(self, in_channel): super().__init__() self.avg_pool3d = nn.AdaptiveAvgPool3d(1) self.fc = nn.Linear(in_channel, 1024) def output_size(self): return self.fc.in_features def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
SheffieldAI/pykale
VideoBoringModel
false
14,403
[ "MIT" ]
324
be7670941fb06835883c80477b26702d407017db
https://github.com/SheffieldAI/pykale/tree/be7670941fb06835883c80477b26702d407017db
Discriminator2
import torch import torch.nn as nn import torch.utils.data class Discriminator2(nn.Module): def __init__(self, n_h): super(Discriminator2, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = c sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2) sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 1) return logits def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr2 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp11 & xmask, other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + x3, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_4, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf1 = buf0 del buf0 buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_5, (64, 4), (4, 1), 0), primals_2, reinterpret_tensor( primals_1, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_2 buf3 = buf2 del buf2 buf4 = empty_strided_cuda((4, 8, 4, 1), (32, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf1, primals_3, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_3 return buf4, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_5, (64, 4), (4, 1), 0) class Discriminator2New(nn.Module): def __init__(self, n_h): super(Discriminator2New, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1, input_2): primals_2 = self.f_k.weight primals_3 = self.f_k.bias primals_1 = input_0 primals_4 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Shen-Lab/GraphCL
Discriminator2
false
14,404
[ "MIT" ]
275
1d43f79d7f33f8133f9d4b4b8254d8aaeb09a615
https://github.com/Shen-Lab/GraphCL/tree/1d43f79d7f33f8133f9d4b4b8254d8aaeb09a615
rec_attention
from _paritybench_helpers import _mock_config import torch import torch.nn as nn def batch_product(iput, mat2): result = None for i in range(iput.size()[0]): op = torch.mm(iput[i], mat2) op = op.unsqueeze(0) if result is None: result = op else: result = torch.cat((result, op), 0) return result.squeeze(2) class rec_attention(nn.Module): def __init__(self, hm, args): super(rec_attention, self).__init__() self.num_directions = 2 if args.bidirectional else 1 if hm is False: self.bin_rep_size = args.bin_rnn_size * self.num_directions else: self.bin_rep_size = args.bin_rnn_size self.bin_context_vector = nn.Parameter(torch.Tensor(self. bin_rep_size, 1), requires_grad=True) self.softmax = nn.Softmax(dim=1) self.bin_context_vector.data.uniform_(-0.1, 0.1) def forward(self, iput): alpha = self.softmax(batch_product(iput, self.bin_context_vector)) [batch_size, source_length, _bin_rep_size2] = iput.size() repres = torch.bmm(alpha.unsqueeze(2).view(batch_size, -1, source_length), iput) return repres, alpha def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hm': 4, 'args': _mock_config(bidirectional=4, bin_rnn_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 3, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tmp6 & tmp4 tmp8 = tl.full([1], 1, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + x0, tmp10 & xmask, eviction_policy= 'evict_last', other=0.0) tmp12 = tmp0 >= tmp8 tmp13 = tmp12 & tmp7 tmp14 = tl.load(in_ptr1 + x0, tmp13 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp11, tmp14) tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp7, tmp15, tmp16) tmp18 = tmp0 >= tmp5 tmp19 = tmp18 & tmp4 tmp20 = tl.load(in_ptr2 + x0, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tl.where(tmp6, tmp17, tmp20) tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype) tmp23 = tl.where(tmp4, tmp21, tmp22) tmp24 = tmp0 >= tmp3 tl.full([1], 4, tl.int64) tmp27 = tl.load(in_ptr3 + x0, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp28 = tl.where(tmp4, tmp23, tmp27) tl.store(out_ptr0 + x2, tmp28, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (4, 4), (4, 1), 0), primals_1, out=buf0) buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (4, 4), (4, 1), 16), primals_1, out=buf1) buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (4, 4), (4, 1), 32), primals_1, out=buf2) buf3 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (4, 4), (4, 1), 48), primals_1, out=buf3) del primals_1 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(16)](buf0, buf1, buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del buf2 del buf3 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = reinterpret_tensor(buf4, (4, 4), (4, 1), 0) del buf4 triton_poi_fused__softmax_2[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 1, 4), (4, 4, 1), 0) del buf5 extern_kernels.bmm(reinterpret_tensor(buf6, (4, 1, 4), (4, 4, 1), 0 ), primals_2, out=buf7) return buf7, buf6, buf6, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 48 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16 ), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0) def batch_product(iput, mat2): result = None for i in range(iput.size()[0]): op = torch.mm(iput[i], mat2) op = op.unsqueeze(0) if result is None: result = op else: result = torch.cat((result, op), 0) return result.squeeze(2) class rec_attentionNew(nn.Module): def __init__(self, hm, args): super(rec_attentionNew, self).__init__() self.num_directions = 2 if args.bidirectional else 1 if hm is False: self.bin_rep_size = args.bin_rnn_size * self.num_directions else: self.bin_rep_size = args.bin_rnn_size self.bin_context_vector = nn.Parameter(torch.Tensor(self. bin_rep_size, 1), requires_grad=True) self.softmax = nn.Softmax(dim=1) self.bin_context_vector.data.uniform_(-0.1, 0.1) def forward(self, input_0): primals_1 = self.bin_context_vector primals_2 = input_0 output = call([primals_1, primals_2]) return output[0], output[1]
Luma-1994/lama
rec_attention
false
14,405
[ "MIT" ]
137
60d802e2e4cce789f03eea11b038212ba5f7fd1b
https://github.com/Luma-1994/lama/tree/60d802e2e4cce789f03eea11b038212ba5f7fd1b
SpanClassifier
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init class SpanClassifier(nn.Module): def __init__(self, hidden_size: 'int', dropout_rate: 'float'): super(SpanClassifier, self).__init__() self.start_proj = nn.Linear(hidden_size, hidden_size) self.end_proj = nn.Linear(hidden_size, hidden_size) self.biaffine = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.concat_proj = nn.Linear(hidden_size * 2, 1) self.dropout = nn.Dropout(dropout_rate) self.reset_parameters() def forward(self, input_features): _bsz, seq_len, _dim = input_features.size() start_feature = self.dropout(F.gelu(self.start_proj(input_features))) end_feature = self.dropout(F.gelu(self.end_proj(input_features))) biaffine_logits = torch.bmm(torch.matmul(start_feature, self. biaffine), end_feature.transpose(1, 2)) start_extend = start_feature.unsqueeze(2).expand(-1, -1, seq_len, -1) end_extend = end_feature.unsqueeze(1).expand(-1, seq_len, -1, -1) span_matrix = torch.cat([start_extend, end_extend], 3) concat_logits = self.concat_proj(span_matrix).squeeze(-1) return biaffine_logits + concat_logits def reset_parameters(self) ->None: init.kaiming_uniform_(self.biaffine, a=math.sqrt(5)) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'dropout_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_view_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x4 = xindex // 32 x1 = xindex // 8 % 4 x3 = xindex // 128 x5 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.5 tmp7 = tmp5 * tmp6 tmp8 = 0.7071067811865476 tmp9 = tmp5 * tmp8 tmp10 = libdevice.erf(tmp9) tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp7 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp19 = tl.load(in_ptr1 + (4 * x1 + 16 * x3 + (-4 + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp4, tmp15, tmp19) tl.store(out_ptr0 + x5, tmp20, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + 0) tmp3 = tl.broadcast_to(tmp2, [XBLOCK]) tmp4 = tmp1 + tmp3 tmp5 = tmp0 + tmp4 tl.store(in_out_ptr0 + x0, tmp5, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, 8), (8, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_view_0[grid(64)](buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(buf2, primals_6, out=buf3) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_gelu_view_0[grid(64)](buf1, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) triton_poi_fused_cat_1[grid(512)](buf0, buf4, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (64, 8), (8, 1), 0), reinterpret_tensor(primals_7, (8, 1), (1, 8), 0), out=buf7) buf8 = buf5 del buf5 triton_poi_fused_add_2[grid(64)](buf8, buf7, primals_8, 64, XBLOCK= 64, num_warps=1, num_stages=1) del buf7 del primals_8 return buf8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf0, buf1, reinterpret_tensor(buf6, (64, 8), (8, 1), 0 ), primals_7, reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), buf4, reinterpret_tensor(buf2, (4, 16), (1, 4), 0 ), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0) class SpanClassifierNew(nn.Module): def __init__(self, hidden_size: 'int', dropout_rate: 'float'): super(SpanClassifierNew, self).__init__() self.start_proj = nn.Linear(hidden_size, hidden_size) self.end_proj = nn.Linear(hidden_size, hidden_size) self.biaffine = nn.Parameter(torch.Tensor(hidden_size, hidden_size)) self.concat_proj = nn.Linear(hidden_size * 2, 1) self.dropout = nn.Dropout(dropout_rate) self.reset_parameters() def reset_parameters(self) ->None: init.kaiming_uniform_(self.biaffine, a=math.sqrt(5)) def forward(self, input_0): primals_2 = self.biaffine primals_4 = self.start_proj.weight primals_3 = self.start_proj.bias primals_6 = self.end_proj.weight primals_5 = self.end_proj.bias primals_7 = self.concat_proj.weight primals_8 = self.concat_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
ShannonAI/dice_loss_for_NLP
SpanClassifier
false
14,406
[ "Apache-2.0" ]
143
d437bb999185535df46fdb74d1f2f57161331b44
https://github.com/ShannonAI/dice_loss_for_NLP/tree/d437bb999185535df46fdb74d1f2f57161331b44
QuadricLinearLoss
import torch import torch.nn as nn class QuadricLinearLoss(nn.Module): def __init__(self, clip_delta): super(QuadricLinearLoss, self).__init__() self.clip_delta = clip_delta def forward(self, y_pred, y_true, weights): td_error = y_true - y_pred td_error_abs = torch.abs(td_error) quadratic_part = torch.clamp(td_error_abs, max=self.clip_delta) linear_part = td_error_abs - quadratic_part loss = 0.5 * quadratic_part ** 2 + self.clip_delta * linear_part loss = torch.mean(loss * weights) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'clip_delta': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_clamp_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = 4.0 tmp5 = triton_helpers.minimum(tmp3, tmp4) tmp6 = tmp5 * tmp5 tmp7 = 0.5 tmp8 = tmp6 * tmp7 tmp9 = tmp3 - tmp5 tmp10 = tmp9 * tmp4 tmp11 = tmp8 + tmp10 tmp13 = tmp11 * tmp12 tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_clamp_mean_mul_pow_sub_0[grid(1)](buf1, arg0_1, arg1_1, arg2_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf1, class QuadricLinearLossNew(nn.Module): def __init__(self, clip_delta): super(QuadricLinearLossNew, self).__init__() self.clip_delta = clip_delta def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Shmuma/Run-Skeleton-Run
QuadricLinearLoss
false
14,407
[ "MIT" ]
92
a953e6c524a444b6a99a54ef5b2886a57de0d185
https://github.com/Shmuma/Run-Skeleton-Run/tree/a953e6c524a444b6a99a54ef5b2886a57de0d185
Discriminator
import torch import torch.nn as nn import torch.utils.data class Discriminator(nn.Module): def __init__(self, n_h): super(Discriminator, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, c, h_pl, h_mi, s_bias1=None, s_bias2=None): c_x = torch.unsqueeze(c, 1) c_x = c_x.expand_as(h_pl) sc_1 = torch.squeeze(self.f_k(h_pl, c_x), 2) sc_2 = torch.squeeze(self.f_k(h_mi, c_x), 2) if s_bias1 is not None: sc_1 += s_bias1 if s_bias2 is not None: sc_2 += s_bias2 logits = torch.cat((sc_1, sc_2), 1) return logits def get_inputs(): return [torch.rand([4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp8 = tmp5 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tmp14 + tmp7 tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype) tmp17 = tl.where(tmp11, tmp15, tmp16) tmp18 = tl.where(tmp4, tmp10, tmp17) tl.store(out_ptr0 + x2, tmp18, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_2, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor( buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) buf2 = buf1 del buf1 buf3 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_5, (16, 4), (4, 1), 0), primals_3, reinterpret_tensor( buf0, (16, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_3 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32) triton_poi_fused_cat_1[grid(32)](buf2, primals_4, buf4, buf5, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 del buf4 del primals_4 return buf5, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor( primals_5, (16, 4), (4, 1), 0) class DiscriminatorNew(nn.Module): def __init__(self, n_h): super(DiscriminatorNew, self).__init__() self.f_k = nn.Bilinear(n_h, n_h, 1) for m in self.modules(): self.weights_init(m) def weights_init(self, m): if isinstance(m, nn.Bilinear): torch.nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.fill_(0.0) def forward(self, input_0, input_1, input_2): primals_3 = self.f_k.weight primals_4 = self.f_k.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Shen-Lab/GraphCL
Discriminator
false
14,408
[ "MIT" ]
275
1d43f79d7f33f8133f9d4b4b8254d8aaeb09a615
https://github.com/Shen-Lab/GraphCL/tree/1d43f79d7f33f8133f9d4b4b8254d8aaeb09a615
MNISTDecoder
import torch import torch.nn as nn import torch.nn.functional as F class MNISTDecoder(nn.Module): """ MNIST decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of a fully connected layer of 128 units with ReLU activation followed by a convolutional block. The convolutional block consists fo 4 convolutional layers having 8, 8, 8 and 1 channels and a kernel size of 3. Each convolutional layer, except the last one, has ReLU nonlinearities and is followed by an upsampling layer of size 2. The final layers uses a sigmoid activation to clip the output values in [0, 1]. """ def __init__(self, latent_dim: 'int'): """ Constructor. Parameters ---------- latent_dim Latent dimension. """ super().__init__() self.fc1 = nn.Linear(latent_dim, 128) self.conv1 = nn.Conv2d(8, 8, kernel_size=(3, 3), padding=1) self.up1 = nn.Upsample(scale_factor=2) self.conv2 = nn.Conv2d(8, 8, kernel_size=(3, 3), padding=1) self.up2 = nn.Upsample(scale_factor=2) self.conv3 = nn.Conv2d(8, 16, kernel_size=(3, 3)) self.up3 = nn.Upsample(scale_factor=2) self.conv4 = nn.Conv2d(16, 1, kernel_size=(3, 3), padding=1) def forward(self, x: 'torch.Tensor') ->torch.Tensor: x = F.relu(self.fc1(x)) x = x.view(x.shape[0], 8, 4, 4) x = self.up1(F.relu(self.conv1(x))) x = self.up2(F.relu(self.conv2(x))) x = self.up3(F.relu(self.conv3(x))) x = torch.sigmoid(self.conv4(x)) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'latent_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 8 % 8 x0 = xindex % 8 x5 = xindex // 64 x2 = xindex // 64 % 8 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_3(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 16 % 16 x0 = xindex % 16 x5 = xindex // 256 x2 = xindex // 256 % 8 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x5), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_5(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_relu_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 50176 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 28 % 28 x0 = xindex % 28 x5 = xindex // 784 x2 = xindex // 784 % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 14, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 14 * tmp4 + 196 * x5), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x6, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3136 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12544 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 196 % 16 x2 = xindex // 3136 x4 = xindex % 3136 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x4 + 3200 * x2), tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 8 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (8, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (8,), (1,)) assert_size_stride(primals_8, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (1, 16, 3, 3), (144, 9, 3, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = buf0 del buf0 buf16 = empty_strided_cuda((4, 128), (128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_2, buf16, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (4, 8, 4, 4), (128, 16, 4, 1), 0), primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 4, 4), (128, 16, 4, 1)) buf3 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf3, 8, XBLOCK =8, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_2[grid(2048)](buf3, buf2, primals_5, buf4, 2048, XBLOCK=256, num_warps=4, num_stages=1) buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 8, 8), (512, 64, 8, 1)) buf6 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_3[grid(16)](buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 8, 16, 16), (2048, 256, 16, 1), torch .float32) triton_poi_fused__unsafe_index_convolution_relu_4[grid(8192)](buf6, buf5, primals_7, buf7, 8192, XBLOCK=128, num_warps=4, num_stages=1) buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 16, 14, 14), (3136, 196, 14, 1)) buf9 = empty_strided_cuda((28,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_5[grid(28)](buf9, 28, XBLOCK=32, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 16, 28, 28), (12544, 784, 28, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_relu_6[grid(50176)](buf9, buf8, primals_9, buf10, 50176, XBLOCK=512, num_warps=4, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 1, 28, 28), (784, 784, 28, 1)) buf12 = buf11 del buf11 triton_poi_fused_convolution_sigmoid_7[grid(3136)](buf12, primals_11, 3136, XBLOCK=128, num_warps=4, num_stages=1) del primals_11 buf13 = empty_strided_cuda((4, 16, 14, 14), (3200, 196, 14, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(12544)]( buf8, primals_9, buf13, 12544, XBLOCK=128, num_warps=4, num_stages=1) del buf8 del primals_9 buf14 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(2048)](buf5 , primals_7, buf14, 2048, XBLOCK=128, num_warps=4, num_stages=1) del buf5 del primals_7 buf15 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_10[grid(512)](buf2 , primals_5, buf15, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 return (buf12, primals_3, primals_4, primals_6, primals_8, primals_10, reinterpret_tensor(buf1, (4, 8, 4, 4), (128, 16, 4, 1), 0), buf3, buf4, buf6, buf7, buf9, buf10, buf12, buf13, buf14, buf15, buf16) class MNISTDecoderNew(nn.Module): """ MNIST decoder used in the Counterfactual with Reinforcement Learning experiments. The model consists of a fully connected layer of 128 units with ReLU activation followed by a convolutional block. The convolutional block consists fo 4 convolutional layers having 8, 8, 8 and 1 channels and a kernel size of 3. Each convolutional layer, except the last one, has ReLU nonlinearities and is followed by an upsampling layer of size 2. The final layers uses a sigmoid activation to clip the output values in [0, 1]. """ def __init__(self, latent_dim: 'int'): """ Constructor. Parameters ---------- latent_dim Latent dimension. """ super().__init__() self.fc1 = nn.Linear(latent_dim, 128) self.conv1 = nn.Conv2d(8, 8, kernel_size=(3, 3), padding=1) self.up1 = nn.Upsample(scale_factor=2) self.conv2 = nn.Conv2d(8, 8, kernel_size=(3, 3), padding=1) self.up2 = nn.Upsample(scale_factor=2) self.conv3 = nn.Conv2d(8, 16, kernel_size=(3, 3)) self.up3 = nn.Upsample(scale_factor=2) self.conv4 = nn.Conv2d(16, 1, kernel_size=(3, 3), padding=1) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.conv1.weight primals_5 = self.conv1.bias primals_6 = self.conv2.weight primals_7 = self.conv2.bias primals_8 = self.conv3.weight primals_9 = self.conv3.bias primals_10 = self.conv4.weight primals_11 = self.conv4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
SeldonIO/alibi
MNISTDecoder
false
14,409
[ "ECL-2.0", "Apache-2.0" ]
1,570
a94b6e3cf6f47aaca560f6d4841e91a62439fa3b
https://github.com/SeldonIO/alibi/tree/a94b6e3cf6f47aaca560f6d4841e91a62439fa3b
CumulativeMagSpectralNorm
import torch import torch.nn as nn class CumulativeMagSpectralNorm(nn.Module): def __init__(self, cumulative=False, use_mid_freq_mu=False): """ Args: cumulative: 是否采用累积的方式计算 mu use_mid_freq_mu: 仅采用中心频率的 mu 来代替全局 mu Notes: 先算均值再累加 等同于 先累加再算均值 """ super().__init__() self.eps = 1e-06 self.cumulative = cumulative self.use_mid_freq_mu = use_mid_freq_mu def forward(self, input): assert input.ndim == 4, f'{self.__name__} only support 4D input.' batch_size, n_channels, n_freqs, n_frames = input.size() device = input.device data_type = input.dtype input = input.reshape(batch_size * n_channels, n_freqs, n_frames) if self.use_mid_freq_mu: step_sum = input[:, int(n_freqs // 2 - 1), :] else: step_sum = torch.mean(input, dim=1) if self.cumulative: cumulative_sum = torch.cumsum(step_sum, dim=-1) entry_count = torch.arange(1, n_frames + 1, dtype=data_type, device=device) entry_count = entry_count.reshape(1, n_frames) entry_count = entry_count.expand_as(cumulative_sum) mu = cumulative_sum / entry_count mu = mu.reshape(batch_size * n_channels, 1, n_frames) else: mu = torch.mean(step_sum, dim=-1) mu = mu.reshape(batch_size * n_channels, 1, 1) input_normed = input / (mu + self.eps) input_normed = input_normed.reshape(batch_size, n_channels, n_freqs, n_frames) return input_normed def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp18 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tl.store(out_ptr0 + x0, tmp36, xmask) @triton.jit def triton_poi_fused_add_div_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = 1e-06 tmp3 = tmp1 + tmp2 tmp4 = tmp0 / tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16,), (1,), torch.float32) get_raw_stream(0) triton_poi_fused_mean_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_1[grid(256)](arg0_1, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del buf0 return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), class CumulativeMagSpectralNormNew(nn.Module): def __init__(self, cumulative=False, use_mid_freq_mu=False): """ Args: cumulative: 是否采用累积的方式计算 mu use_mid_freq_mu: 仅采用中心频率的 mu 来代替全局 mu Notes: 先算均值再累加 等同于 先累加再算均值 """ super().__init__() self.eps = 1e-06 self.cumulative = cumulative self.use_mid_freq_mu = use_mid_freq_mu def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ShkarupaDC/FullSubNet
CumulativeMagSpectralNorm
false
14,410
[ "MIT" ]
219
2aef8b656376a42fbf519e0020636a893b56c4f8
https://github.com/ShkarupaDC/FullSubNet/tree/2aef8b656376a42fbf519e0020636a893b56c4f8
My_loss
import torch import torch.utils.data import torch._utils import torch.nn.parallel import torch.optim from torch.autograd import Variable as Variable class My_loss(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): cccs = 0 for i in range(x.size(-1)): x_i = x[:, i] y_i = y[:, i] if len(x_i.size()) == 2 or len(y_i.size()) == 2: x_i = x_i.contiguous() y_i = y_i.contiguous() x_i = x_i.view(-1) y_i = y_i.view(-1) vx = x_i - torch.mean(x_i) vy = y_i - torch.mean(y_i) rho = torch.sum(vx * vy) / (torch.sqrt(torch.sum(torch.pow(vx, 2))) * torch.sqrt(torch.sum(torch.pow(vy, 2)))) x_m = torch.mean(x_i) y_m = torch.mean(y_i) x_s = torch.std(x_i) y_s = torch.std(y_i) ccc = 2 * rho * x_s * y_s / (torch.pow(x_s, 2) + torch.pow(y_s, 2) + torch.pow(x_m - y_m, 2)) cccs += ccc return -cccs def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch._utils import torch.nn.parallel import torch.optim from torch.autograd import Variable as Variable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mean_mul_neg_pow_sqrt_std_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp4 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp47 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp51 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp91 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp95 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp135 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp139 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.sum(tmp5, 1)[:, None] tmp8 = 64.0 tmp9 = tmp3 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp7 / tmp8 tmp12 = tmp4 - tmp11 tmp13 = tmp10 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp10 * tmp10 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tmp12 * tmp12 tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp24 = tl.sum(tmp22, 1)[:, None] tmp26 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp28 = tl.sum(tmp26, 1)[:, None] tmp29 = tl.full([XBLOCK, 1], 64, tl.int32) tmp30 = tmp29.to(tl.float32) tmp31 = tmp28 / tmp30 tmp32 = tmp1 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK]) tmp36 = tl.sum(tmp34, 1)[:, None] tmp38 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp40 = tl.sum(tmp38, 1)[:, None] tmp41 = tmp40 / tmp30 tmp42 = tmp5 - tmp41 tmp43 = tmp42 * tmp42 tmp44 = tl.broadcast_to(tmp43, [XBLOCK, RBLOCK]) tmp46 = tl.sum(tmp44, 1)[:, None] tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK]) tmp50 = tl.sum(tmp48, 1)[:, None] tmp52 = tl.broadcast_to(tmp51, [XBLOCK, RBLOCK]) tmp54 = tl.sum(tmp52, 1)[:, None] tmp55 = tmp50 / tmp8 tmp56 = tmp47 - tmp55 tmp57 = tmp54 / tmp8 tmp58 = tmp51 - tmp57 tmp59 = tmp56 * tmp58 tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK]) tmp62 = tl.sum(tmp60, 1)[:, None] tmp63 = tmp56 * tmp56 tmp64 = tl.broadcast_to(tmp63, [XBLOCK, RBLOCK]) tmp66 = tl.sum(tmp64, 1)[:, None] tmp67 = tmp58 * tmp58 tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK]) tmp70 = tl.sum(tmp68, 1)[:, None] tmp72 = tl.broadcast_to(tmp48, [XBLOCK, RBLOCK]) tmp74 = tl.sum(tmp72, 1)[:, None] tmp75 = tmp74 / tmp30 tmp76 = tmp48 - tmp75 tmp77 = tmp76 * tmp76 tmp78 = tl.broadcast_to(tmp77, [XBLOCK, RBLOCK]) tmp80 = tl.sum(tmp78, 1)[:, None] tmp82 = tl.broadcast_to(tmp52, [XBLOCK, RBLOCK]) tmp84 = tl.sum(tmp82, 1)[:, None] tmp85 = tmp84 / tmp30 tmp86 = tmp52 - tmp85 tmp87 = tmp86 * tmp86 tmp88 = tl.broadcast_to(tmp87, [XBLOCK, RBLOCK]) tmp90 = tl.sum(tmp88, 1)[:, None] tmp92 = tl.broadcast_to(tmp91, [XBLOCK, RBLOCK]) tmp94 = tl.sum(tmp92, 1)[:, None] tmp96 = tl.broadcast_to(tmp95, [XBLOCK, RBLOCK]) tmp98 = tl.sum(tmp96, 1)[:, None] tmp99 = tmp94 / tmp8 tmp100 = tmp91 - tmp99 tmp101 = tmp98 / tmp8 tmp102 = tmp95 - tmp101 tmp103 = tmp100 * tmp102 tmp104 = tl.broadcast_to(tmp103, [XBLOCK, RBLOCK]) tmp106 = tl.sum(tmp104, 1)[:, None] tmp107 = tmp100 * tmp100 tmp108 = tl.broadcast_to(tmp107, [XBLOCK, RBLOCK]) tmp110 = tl.sum(tmp108, 1)[:, None] tmp111 = tmp102 * tmp102 tmp112 = tl.broadcast_to(tmp111, [XBLOCK, RBLOCK]) tmp114 = tl.sum(tmp112, 1)[:, None] tmp116 = tl.broadcast_to(tmp92, [XBLOCK, RBLOCK]) tmp118 = tl.sum(tmp116, 1)[:, None] tmp119 = tmp118 / tmp30 tmp120 = tmp92 - tmp119 tmp121 = tmp120 * tmp120 tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK]) tmp124 = tl.sum(tmp122, 1)[:, None] tmp126 = tl.broadcast_to(tmp96, [XBLOCK, RBLOCK]) tmp128 = tl.sum(tmp126, 1)[:, None] tmp129 = tmp128 / tmp30 tmp130 = tmp96 - tmp129 tmp131 = tmp130 * tmp130 tmp132 = tl.broadcast_to(tmp131, [XBLOCK, RBLOCK]) tmp134 = tl.sum(tmp132, 1)[:, None] tmp136 = tl.broadcast_to(tmp135, [XBLOCK, RBLOCK]) tmp138 = tl.sum(tmp136, 1)[:, None] tmp140 = tl.broadcast_to(tmp139, [XBLOCK, RBLOCK]) tmp142 = tl.sum(tmp140, 1)[:, None] tmp143 = tmp138 / tmp8 tmp144 = tmp135 - tmp143 tmp145 = tmp142 / tmp8 tmp146 = tmp139 - tmp145 tmp147 = tmp144 * tmp146 tmp148 = tl.broadcast_to(tmp147, [XBLOCK, RBLOCK]) tmp150 = tl.sum(tmp148, 1)[:, None] tmp151 = tmp144 * tmp144 tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK]) tmp154 = tl.sum(tmp152, 1)[:, None] tmp155 = tmp146 * tmp146 tmp156 = tl.broadcast_to(tmp155, [XBLOCK, RBLOCK]) tmp158 = tl.sum(tmp156, 1)[:, None] tmp160 = tl.broadcast_to(tmp136, [XBLOCK, RBLOCK]) tmp162 = tl.sum(tmp160, 1)[:, None] tmp163 = tmp162 / tmp30 tmp164 = tmp136 - tmp163 tmp165 = tmp164 * tmp164 tmp166 = tl.broadcast_to(tmp165, [XBLOCK, RBLOCK]) tmp168 = tl.sum(tmp166, 1)[:, None] tmp170 = tl.broadcast_to(tmp140, [XBLOCK, RBLOCK]) tmp172 = tl.sum(tmp170, 1)[:, None] tmp173 = tmp172 / tmp30 tmp174 = tmp140 - tmp173 tmp175 = tmp174 * tmp174 tmp176 = tl.broadcast_to(tmp175, [XBLOCK, RBLOCK]) tmp178 = tl.sum(tmp176, 1)[:, None] tmp179 = libdevice.sqrt(tmp154) tmp180 = libdevice.sqrt(tmp158) tmp181 = tmp179 * tmp180 tmp182 = tmp150 / tmp181 tmp183 = 2.0 tmp184 = tmp182 * tmp183 tmp185 = 63.0 tmp186 = tmp168 / tmp185 tmp187 = libdevice.sqrt(tmp186) tmp188 = tmp184 * tmp187 tmp189 = tmp178 / tmp185 tmp190 = libdevice.sqrt(tmp189) tmp191 = tmp188 * tmp190 tmp192 = tmp187 * tmp187 tmp193 = tmp190 * tmp190 tmp194 = tmp192 + tmp193 tmp195 = tmp143 - tmp145 tmp196 = tmp195 * tmp195 tmp197 = tmp194 + tmp196 tmp198 = tmp191 / tmp197 tmp199 = libdevice.sqrt(tmp110) tmp200 = libdevice.sqrt(tmp114) tmp201 = tmp199 * tmp200 tmp202 = tmp106 / tmp201 tmp203 = tmp202 * tmp183 tmp204 = tmp124 / tmp185 tmp205 = libdevice.sqrt(tmp204) tmp206 = tmp203 * tmp205 tmp207 = tmp134 / tmp185 tmp208 = libdevice.sqrt(tmp207) tmp209 = tmp206 * tmp208 tmp210 = tmp205 * tmp205 tmp211 = tmp208 * tmp208 tmp212 = tmp210 + tmp211 tmp213 = tmp99 - tmp101 tmp214 = tmp213 * tmp213 tmp215 = tmp212 + tmp214 tmp216 = tmp209 / tmp215 tmp217 = libdevice.sqrt(tmp66) tmp218 = libdevice.sqrt(tmp70) tmp219 = tmp217 * tmp218 tmp220 = tmp62 / tmp219 tmp221 = tmp220 * tmp183 tmp222 = tmp80 / tmp185 tmp223 = libdevice.sqrt(tmp222) tmp224 = tmp221 * tmp223 tmp225 = tmp90 / tmp185 tmp226 = libdevice.sqrt(tmp225) tmp227 = tmp224 * tmp226 tmp228 = tmp223 * tmp223 tmp229 = tmp226 * tmp226 tmp230 = tmp228 + tmp229 tmp231 = tmp55 - tmp57 tmp232 = tmp231 * tmp231 tmp233 = tmp230 + tmp232 tmp234 = tmp227 / tmp233 tmp235 = libdevice.sqrt(tmp20) tmp236 = libdevice.sqrt(tmp24) tmp237 = tmp235 * tmp236 tmp238 = tmp16 / tmp237 tmp239 = tmp238 * tmp183 tmp240 = tmp36 / tmp185 tmp241 = libdevice.sqrt(tmp240) tmp242 = tmp239 * tmp241 tmp243 = tmp46 / tmp185 tmp244 = libdevice.sqrt(tmp243) tmp245 = tmp242 * tmp244 tmp246 = tmp241 * tmp241 tmp247 = tmp244 * tmp244 tmp248 = tmp246 + tmp247 tmp249 = tmp9 - tmp11 tmp250 = tmp249 * tmp249 tmp251 = tmp248 + tmp250 tmp252 = tmp245 / tmp251 tmp253 = 0.0 tmp254 = tmp198 + tmp253 tmp255 = tmp254 + tmp216 tmp256 = tmp255 + tmp234 tmp257 = tmp256 + tmp252 tmp258 = -tmp257 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp258, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf11 = empty_strided_cuda((), (), torch.float32) buf13 = buf11 del buf11 buf56 = buf13 del buf13 get_raw_stream(0) triton_per_fused_add_div_mean_mul_neg_pow_sqrt_std_sub_sum_0[grid(1)]( buf56, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf56, class My_lossNew(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Shelly-Lee/ICCV-2021-Competition-Valence-Arousal-Challenge
My_loss
false
14,411
[ "MIT" ]
58
b3816ef4d4ba7b98c2f9ddd0dd3942d7a666777a
https://github.com/Shelly-Lee/ICCV-2021-Competition-Valence-Arousal-Challenge/tree/b3816ef4d4ba7b98c2f9ddd0dd3942d7a666777a
UnaryBlock
import torch import torch.utils.data import torch.nn as nn from torch.nn.parameter import Parameter class BatchNormBlock(nn.Module): def __init__(self, in_dim, use_bn, bn_momentum): """ Initialize a batch normalization block. If network does not use batch normalization, replace with biases. :param in_dim: dimension input features :param use_bn: boolean indicating if we use Batch Norm :param bn_momentum: Batch norm momentum """ super(BatchNormBlock, self).__init__() self.bn_momentum = bn_momentum self.use_bn = use_bn self.in_dim = in_dim if self.use_bn: self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum) else: self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True) return def reset_parameters(self): nn.init.zeros_(self.bias) def forward(self, x): if self.use_bn: x = x.unsqueeze(2) x = x.transpose(0, 2) x = self.batch_norm(x) x = x.transpose(0, 2) return x.squeeze() else: return x + self.bias def __repr__(self): return ( 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})' .format(self.in_dim, self.bn_momentum, str(not self.use_bn))) class UnaryBlock(nn.Module): def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False): """ Initialize a standard unary block with its ReLU and BatchNorm. :param in_dim: dimension input features :param out_dim: dimension input features :param use_bn: boolean indicating if we use Batch Norm :param bn_momentum: Batch norm momentum """ super(UnaryBlock, self).__init__() self.bn_momentum = bn_momentum self.use_bn = use_bn self.no_relu = no_relu self.in_dim = in_dim self.out_dim = out_dim self.mlp = nn.Linear(in_dim, out_dim, bias=False) self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum ) if not no_relu: self.leaky_relu = nn.LeakyReLU(0.1) return def forward(self, x, batch=None): x = self.mlp(x) x = self.batch_norm(x) if not self.no_relu: x = self.leaky_relu(x) return x def __repr__(self): return ( 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})' .format(self.in_dim, self.out_dim, str(self.use_bn), str(not self.no_relu))) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4, 'use_bn': 4, 'bn_momentum': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__native_batch_norm_legit_clone_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask) tmp3 = tl.load(in_ptr0 + (8 + x0), xmask) tmp5 = tl.load(in_ptr0 + (12 + x0), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.1 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + (y0 + 4 * x1), tmp9, xmask & ymask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__native_batch_norm_legit_clone_0[grid(4)](buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4), (1, 4), torch.float32) triton_poi_fused_leaky_relu_1[grid(4, 4)](buf0, buf1, buf2, buf3, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del buf1 del buf2 return buf3, primals_2, buf0 class BatchNormBlock(nn.Module): def __init__(self, in_dim, use_bn, bn_momentum): """ Initialize a batch normalization block. If network does not use batch normalization, replace with biases. :param in_dim: dimension input features :param use_bn: boolean indicating if we use Batch Norm :param bn_momentum: Batch norm momentum """ super(BatchNormBlock, self).__init__() self.bn_momentum = bn_momentum self.use_bn = use_bn self.in_dim = in_dim if self.use_bn: self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum) else: self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True) return def reset_parameters(self): nn.init.zeros_(self.bias) def forward(self, x): if self.use_bn: x = x.unsqueeze(2) x = x.transpose(0, 2) x = self.batch_norm(x) x = x.transpose(0, 2) return x.squeeze() else: return x + self.bias def __repr__(self): return ( 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})' .format(self.in_dim, self.bn_momentum, str(not self.use_bn))) class UnaryBlockNew(nn.Module): def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False): """ Initialize a standard unary block with its ReLU and BatchNorm. :param in_dim: dimension input features :param out_dim: dimension input features :param use_bn: boolean indicating if we use Batch Norm :param bn_momentum: Batch norm momentum """ super(UnaryBlockNew, self).__init__() self.bn_momentum = bn_momentum self.use_bn = use_bn self.no_relu = no_relu self.in_dim = in_dim self.out_dim = out_dim self.mlp = nn.Linear(in_dim, out_dim, bias=False) self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum ) if not no_relu: self.leaky_relu = nn.LeakyReLU(0.1) return def __repr__(self): return ( 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})' .format(self.in_dim, self.out_dim, str(self.use_bn), str(not self.no_relu))) def forward(self, input_0): primals_1 = self.mlp.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
ShengyuH/PredateOverlap
UnaryBlock
false
14,412
[ "MIT" ]
153
770c3063399f08b3836935212ab4c84d355b4704
https://github.com/ShengyuH/PredateOverlap/tree/770c3063399f08b3836935212ab4c84d355b4704
LinearNet
import torch import torch.nn as nn from collections import OrderedDict from itertools import tee def pairwise(iterable): """s -> (s0,s1), (s1,s2), (s2, s3), ...""" a, b = tee(iterable) next(b, None) return zip(a, b) class LayerNorm(nn.Module): def __init__(self, features, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class LinearNet(nn.Module): def __init__(self, layers, activation=torch.nn.ELU, layer_norm=False, linear_layer=nn.Linear): super(LinearNet, self).__init__() self.input_shape = layers[0] self.output_shape = layers[-1] if layer_norm: def layer_fn(layer): return [('linear_{}'.format(layer[0]), linear_layer(layer[1 ][0], layer[1][1])), ('layer_norm_{}'.format(layer[0]), LayerNorm(layer[1][1])), ('act_{}'.format(layer[0]), activation())] else: def layer_fn(layer): return [('linear_{}'.format(layer[0]), linear_layer(layer[1 ][0], layer[1][1])), ('act_{}'.format(layer[0]), activation())] self.net = torch.nn.Sequential(OrderedDict([x for y in map(lambda layer: layer_fn(layer), enumerate(pairwise(layers))) for x in y])) def forward(self, x): x = self.net.forward(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'layers': [4, 4]}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from collections import OrderedDict from itertools import tee assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_elu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def pairwise(iterable): """s -> (s0,s1), (s1,s2), (s2, s3), ...""" a, b = tee(iterable) next(b, None) return zip(a, b) class LayerNorm(nn.Module): def __init__(self, features, eps=1e-06): super().__init__() self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta class LinearNetNew(nn.Module): def __init__(self, layers, activation=torch.nn.ELU, layer_norm=False, linear_layer=nn.Linear): super(LinearNetNew, self).__init__() self.input_shape = layers[0] self.output_shape = layers[-1] if layer_norm: def layer_fn(layer): return [('linear_{}'.format(layer[0]), linear_layer(layer[1 ][0], layer[1][1])), ('layer_norm_{}'.format(layer[0]), LayerNorm(layer[1][1])), ('act_{}'.format(layer[0]), activation())] else: def layer_fn(layer): return [('linear_{}'.format(layer[0]), linear_layer(layer[1 ][0], layer[1][1])), ('act_{}'.format(layer[0]), activation())] self.net = torch.nn.Sequential(OrderedDict([x for y in map(lambda layer: layer_fn(layer), enumerate(pairwise(layers))) for x in y])) def forward(self, input_0): primals_1 = self.net.linear_0.weight primals_2 = self.net.linear_0.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Shmuma/Run-Skeleton-Run
LinearNet
false
14,413
[ "MIT" ]
92
a953e6c524a444b6a99a54ef5b2886a57de0d185
https://github.com/Shmuma/Run-Skeleton-Run/tree/a953e6c524a444b6a99a54ef5b2886a57de0d185
FastRNNCell
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastRNNCell(RNNCell): """ FastRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. alphaInit = init for alpha, the update scalar betaInit = init for beta, the weight for previous state FastRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=- 3.0, betaInit=3.0, name='FastRNN'): super(FastRNNCell, self).__init__(input_size, hidden_size, None, update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity) self._alphaInit = alphaInit self._betaInit = betaInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1])) self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastRNN' def forward(self, input, state): if self._wRank is None: wComp = torch.matmul(input, self.W) else: wComp = torch.matmul(torch.matmul(input, self.W1), self.W2) if self._uRank is None: uComp = torch.matmul(state, self.U) else: uComp = torch.matmul(torch.matmul(state, self.U1), self.U2) pre_comp = wComp + uComp c = gen_nonlinearity(pre_comp + self.bias_update, self. _update_nonlinearity) new_h = torch.sigmoid(self.beta) * state + torch.sigmoid(self.alpha ) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_update]) Vars.extend([self.alpha, self.beta]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK]) tmp9 = tl.load(in_ptr3 + x2, xmask) tmp11 = tl.load(in_ptr4 + 0) tmp12 = tl.broadcast_to(tmp11, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = libdevice.tanh(tmp4) tmp8 = tl.sigmoid(tmp7) tmp10 = tmp8 * tmp9 tmp13 = tl.sigmoid(tmp12) tmp14 = tmp13 * tmp5 tmp15 = tmp10 + tmp14 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 1), (1, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_tanh_0[grid(256)](buf2, buf1, primals_5, primals_6, primals_4, primals_7, buf3, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf1 del primals_5 return buf3, primals_4, primals_6, primals_7, buf2, reinterpret_tensor( primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastRNNCellNew(RNNCell): """ FastRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. alphaInit = init for alpha, the update scalar betaInit = init for beta, the weight for previous state FastRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=- 3.0, betaInit=3.0, name='FastRNN'): super(FastRNNCellNew, self).__init__(input_size, hidden_size, None, update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity) self._alphaInit = alphaInit self._betaInit = betaInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1])) self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastRNN' def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_update]) Vars.extend([self.alpha, self.beta]) return Vars def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.U primals_5 = self.bias_update primals_6 = self.alpha primals_7 = self.beta primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML
FastRNNCell
false
14,414
[ "MIT" ]
719
ef9f8a77f096acbdeb941014791f8eda1c1bc35b
https://github.com/Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML/tree/ef9f8a77f096acbdeb941014791f8eda1c1bc35b
ProtoNN
import torch import numpy as np import torch.nn as nn import torch.onnx from itertools import product as product class ProtoNN(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super(ProtoNN, self).__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, X): """ This method is responsible for construction of the forward computation graph. The end point of the computation graph, or in other words the output operator for the forward computation is returned. X: Input of shape [-1, inputDimension] returns: The forward computation outputs, self.protoNNOut """ assert self.__validInit is True, 'Initialization failed!' W, B, Z, gamma = self.W, self.B, self.Z, self.gamma WX = torch.matmul(X, W) dim = [-1, WX.shape[1], 1] WX = torch.reshape(WX, dim) dim = [1, B.shape[0], -1] B_ = torch.reshape(B, dim) l2sim = B_ - WX l2sim = torch.pow(l2sim, 2) l2sim = torch.sum(l2sim, dim=1, keepdim=True) self.l2sim = l2sim gammal2sim = -1 * gamma * gamma * l2sim M = torch.exp(gammal2sim) dim = [1] + list(Z.shape) Z_ = torch.reshape(Z, dim) y = Z_ * M y = torch.sum(y, dim=2) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputDimension': 4, 'projectionDimension': 4, 'numPrototypes': 4, 'numOutputLabels': 4, 'gamma': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_exp_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = -16.0 tmp3 = tmp1 * tmp2 tmp4 = tl_math.exp(tmp3) tmp5 = tmp0 * tmp4 tmp8 = tmp7 * tmp2 tmp9 = tl_math.exp(tmp8) tmp10 = tmp6 * tmp9 tmp11 = tmp5 + tmp10 tmp14 = tmp13 * tmp2 tmp15 = tl_math.exp(tmp14) tmp16 = tmp12 * tmp15 tmp17 = tmp11 + tmp16 tmp20 = tmp19 * tmp2 tmp21 = tl_math.exp(tmp20) tmp22 = tmp18 * tmp21 tmp23 = tmp17 + tmp22 tl.store(out_ptr0 + x2, tmp23, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(256)](primals_2, buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_exp_mul_sum_1[grid(256)](primals_3, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor( primals_4, (4, 64), (1, 4), 0) class ProtoNNNew(nn.Module): def __init__(self, inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma, W=None, B=None, Z=None): """ Forward computation graph for ProtoNN. inputDimension: Input data dimension or feature dimension. projectionDimension: hyperparameter numPrototypes: hyperparameter numOutputLabels: The number of output labels or classes W, B, Z: Numpy matrices that can be used to initialize projection matrix(W), prototype matrix (B) and prototype labels matrix (B). Expected Dimensions: W inputDimension (d) x projectionDimension (d_cap) B projectionDimension (d_cap) x numPrototypes (m) Z numOutputLabels (L) x numPrototypes (m) """ super(ProtoNNNew, self).__init__() self.__d = inputDimension self.__d_cap = projectionDimension self.__m = numPrototypes self.__L = numOutputLabels self.W, self.B, self.Z = None, None, None self.gamma = gamma self.__validInit = False self.__initWBZ(W, B, Z) self.__validateInit() def __validateInit(self): self.__validinit = False errmsg = 'Dimensions mismatch! Should be W[d, d_cap]' errmsg += ', B[d_cap, m] and Z[L, m]' d, d_cap, m, L, _ = self.getHyperParams() assert self.W.shape[0] == d, errmsg assert self.W.shape[1] == d_cap, errmsg assert self.B.shape[0] == d_cap, errmsg assert self.B.shape[1] == m, errmsg assert self.Z.shape[0] == L, errmsg assert self.Z.shape[1] == m, errmsg self.__validInit = True def __initWBZ(self, inW, inB, inZ): if inW is None: self.W = torch.randn([self.__d, self.__d_cap]) self.W = nn.Parameter(self.W) else: self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32))) if inB is None: self.B = torch.randn([self.__d_cap, self.__m]) self.B = nn.Parameter(self.B) else: self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32))) if inZ is None: self.Z = torch.randn([self.__L, self.__m]) self.Z = nn.Parameter(self.Z) else: self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32))) def getHyperParams(self): """ Returns the model hyperparameters: [inputDimension, projectionDimension, numPrototypes, numOutputLabels, gamma] """ d = self.__d dcap = self.__d_cap m = self.__m L = self.__L return d, dcap, m, L, self.gamma def getModelMatrices(self): """ Returns model matrices, which can then be evaluated to obtain corresponding numpy arrays. These can then be exported as part of other implementations of ProtonNN, for instance a C++ implementation or pure python implementation. Returns [ProjectionMatrix (W), prototypeMatrix (B), prototypeLabelsMatrix (Z), gamma] """ return self.W, self.B, self.Z, self.gamma def forward(self, input_0): primals_1 = self.W primals_2 = self.B primals_3 = self.Z primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML
ProtoNN
false
14,415
[ "MIT" ]
719
ef9f8a77f096acbdeb941014791f8eda1c1bc35b
https://github.com/Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML/tree/ef9f8a77f096acbdeb941014791f8eda1c1bc35b
FastGRNNCell
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastGRNNCell(RNNCell): """ FastGRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. zetaInit = init for zeta, the scale param nuInit = init for nu, the translation param FastGRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(Wx_t + Uh_{t-1} + B_g) h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'): super(FastGRNNCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank, wSparsity, uSparsity) self._zetaInit = zetaInit self._nuInit = nuInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1])) self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastGRNN' def forward(self, input, state): if self._wRank is None: wComp = torch.matmul(input, self.W) else: wComp = torch.matmul(torch.matmul(input, self.W1), self.W2) if self._uRank is None: uComp = torch.matmul(state, self.U) else: uComp = torch.matmul(torch.matmul(state, self.U1), self.U2) pre_comp = wComp + uComp z = gen_nonlinearity(pre_comp + self.bias_gate, self._gate_nonlinearity ) c = gen_nonlinearity(pre_comp + self.bias_update, self. _update_nonlinearity) new_h = z * state + (torch.sigmoid(self.zeta) * (1.0 - z) + torch. sigmoid(self.nu)) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) Vars.extend([self.zeta, self.nu]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex x1 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x0, xmask) tmp8 = tl.load(in_ptr3 + 0) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp14 = tl.load(in_ptr4 + 0) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp10 = tl.sigmoid(tmp9) tmp11 = 1.0 tmp12 = tmp11 - tmp5 tmp13 = tmp10 * tmp12 tmp16 = tl.sigmoid(tmp15) tmp17 = tmp13 + tmp16 tmp19 = tmp2 + tmp18 tmp20 = libdevice.tanh(tmp19) tmp21 = tmp17 * tmp20 tmp22 = tmp7 + tmp21 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp22, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (1, 4), (4, 1)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1, 1), (1, 1)) assert_size_stride(primals_8, (1, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf2, buf1, primals_5, primals_4, primals_7, primals_8, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class FastGRNNCellNew(RNNCell): """ FastGRNN Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates two matrices if not None) uRank = rank of U matrix (creates two matrices if not None) wSparsity = intended sparsity of W matrix(ces) uSparsity = intended sparsity of U matrix(ces) Warning: The Cell will not automatically sparsify. The user must invoke .sparsify to hard threshold. zetaInit = init for zeta, the scale param nuInit = init for nu, the translation param FastGRNN architecture and compression techniques are found in FastGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(Wx_t + Uh_{t-1} + B_g) h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h) h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^ W and U can further parameterised into low rank version by W = matmul(W_1, W_2) and U = matmul(U_1, U_2) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'): super(FastGRNNCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank, wSparsity, uSparsity) self._zetaInit = zetaInit self._nuInit = nuInit if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])) else: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size]) ) else: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1])) self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1])) @property def name(self): return self._name @property def cellType(self): return 'FastGRNN' def getVars(self): Vars = [] if self._num_W_matrices == 1: Vars.append(self.W) else: Vars.extend([self.W1, self.W2]) if self._num_U_matrices == 1: Vars.append(self.U) else: Vars.extend([self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) Vars.extend([self.zeta, self.nu]) return Vars def forward(self, input_0, input_1): primals_1 = self.W primals_3 = self.U primals_5 = self.bias_gate primals_6 = self.bias_update primals_7 = self.zeta primals_8 = self.nu primals_2 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML
FastGRNNCell
false
14,416
[ "MIT" ]
719
ef9f8a77f096acbdeb941014791f8eda1c1bc35b
https://github.com/Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML/tree/ef9f8a77f096acbdeb941014791f8eda1c1bc35b
UGRNNLRCell
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class UGRNNLRCell(RNNCell): """ UGRNN LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 3 matrices if not None else creates 2 matrices) uRank = rank of U matrix (creates 3 matrices if not None else creates 2 matrices) UGRNN architecture and compression techniques are found in UGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(W1x_t + U1h_{t-1} + B_g) h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='UGRNNLR'): super(UGRNNLRCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'UGRNNLR' def forward(self, input, state): if self._wRank is None: wComp1 = torch.matmul(input, self.W1) wComp2 = torch.matmul(input, self.W2) else: wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1) wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2) if self._uRank is None: uComp1 = torch.matmul(state, self.U1) uComp2 = torch.matmul(state, self.U2) else: uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1) uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2) pre_comp1 = wComp1 + uComp1 pre_comp2 = wComp2 + uComp2 z = gen_nonlinearity(pre_comp1 + self.bias_gate, self. _gate_nonlinearity) c = gen_nonlinearity(pre_comp2 + self.bias_update, self. _update_nonlinearity) new_h = z * state + (1.0 - z) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 2: Vars.extend([self.W1, self.W2]) else: Vars.extend([self.W, self.W1, self.W2]) if self._num_U_matrices == 2: Vars.extend([self.U1, self.U2]) else: Vars.extend([self.U, self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (1, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_6, out=buf3) del primals_6 buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf4, buf5, buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf2 del buf3 del primals_7 del primals_8 return buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class UGRNNLRCellNew(RNNCell): """ UGRNN LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 3 matrices if not None else creates 2 matrices) uRank = rank of U matrix (creates 3 matrices if not None else creates 2 matrices) UGRNN architecture and compression techniques are found in UGRNN(LINK) paper Basic architecture is like: z_t = gate_nl(W1x_t + U1h_{t-1} + B_g) h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='UGRNNLR'): super(UGRNNLRCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'UGRNNLR' def getVars(self): Vars = [] if self._num_W_matrices == 2: Vars.extend([self.W1, self.W2]) else: Vars.extend([self.W, self.W1, self.W2]) if self._num_U_matrices == 2: Vars.extend([self.U1, self.U2]) else: Vars.extend([self.U, self.U1, self.U2]) Vars.extend([self.bias_gate, self.bias_update]) return Vars def forward(self, input_0, input_1): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.U1 primals_6 = self.U2 primals_7 = self.bias_gate primals_8 = self.bias_update primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML
UGRNNLRCell
false
14,417
[ "MIT" ]
719
ef9f8a77f096acbdeb941014791f8eda1c1bc35b
https://github.com/Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML/tree/ef9f8a77f096acbdeb941014791f8eda1c1bc35b
SSIM
import torch import torch.nn as nn class SSIM(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIM, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, x, y): x = self.refl(x) y = self.refl(y) mu_x = self.mu_x_pool(x) mu_y = self.mu_y_pool(y) sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tmp21 = tmp20 + tmp19 tmp23 = tmp22 + tmp21 tmp25 = tmp24 + tmp23 tmp27 = tmp26 + tmp25 tmp29 = tmp28 + tmp27 tmp31 = tmp30 + tmp29 tmp33 = tmp32 + tmp31 tmp35 = tmp34 + tmp33 tmp36 = tmp35 * tmp17 tmp37 = tmp19 * tmp19 tmp38 = tmp20 * tmp20 tmp39 = tmp38 + tmp37 tmp40 = tmp22 * tmp22 tmp41 = tmp40 + tmp39 tmp42 = tmp24 * tmp24 tmp43 = tmp42 + tmp41 tmp44 = tmp26 * tmp26 tmp45 = tmp44 + tmp43 tmp46 = tmp28 * tmp28 tmp47 = tmp46 + tmp45 tmp48 = tmp30 * tmp30 tmp49 = tmp48 + tmp47 tmp50 = tmp32 * tmp32 tmp51 = tmp50 + tmp49 tmp52 = tmp34 * tmp34 tmp53 = tmp52 + tmp51 tmp54 = tmp53 * tmp17 tmp57 = tmp56 + tmp55 tmp59 = tmp58 + tmp57 tmp61 = tmp60 + tmp59 tmp63 = tmp62 + tmp61 tmp65 = tmp64 + tmp63 tmp67 = tmp66 + tmp65 tmp69 = tmp68 + tmp67 tmp71 = tmp70 + tmp69 tmp72 = tmp71 * tmp17 tmp73 = tmp55 * tmp55 tmp74 = tmp56 * tmp56 tmp75 = tmp74 + tmp73 tmp76 = tmp58 * tmp58 tmp77 = tmp76 + tmp75 tmp78 = tmp60 * tmp60 tmp79 = tmp78 + tmp77 tmp80 = tmp62 * tmp62 tmp81 = tmp80 + tmp79 tmp82 = tmp64 * tmp64 tmp83 = tmp82 + tmp81 tmp84 = tmp66 * tmp66 tmp85 = tmp84 + tmp83 tmp86 = tmp68 * tmp68 tmp87 = tmp86 + tmp85 tmp88 = tmp70 * tmp70 tmp89 = tmp88 + tmp87 tmp90 = tmp89 * tmp17 tmp91 = 2.0 tmp92 = tmp36 * tmp91 tmp93 = tmp92 * tmp72 tmp94 = 0.0001 tmp95 = tmp93 + tmp94 tmp96 = tmp36 * tmp72 tmp97 = tmp18 - tmp96 tmp98 = tmp97 * tmp91 tmp99 = 0.0009 tmp100 = tmp98 + tmp99 tmp101 = tmp95 * tmp100 tmp102 = tmp36 * tmp36 tmp103 = tmp72 * tmp72 tmp104 = tmp102 + tmp103 tmp105 = tmp104 + tmp94 tmp106 = tmp54 - tmp102 tmp107 = tmp90 - tmp103 tmp108 = tmp106 + tmp107 tmp109 = tmp108 + tmp99 tmp110 = tmp105 * tmp109 tmp111 = tmp101 / tmp110 tmp112 = 1.0 tmp113 = tmp112 - tmp111 tmp114 = 0.5 tmp115 = tmp113 * tmp114 tmp116 = 0.0 tmp117 = triton_helpers.maximum(tmp115, tmp116) tmp118 = triton_helpers.minimum(tmp117, tmp112) tl.store(in_out_ptr0 + x3, tmp118, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf0 del buf0 buf7 = buf6 del buf6 triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[ grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf2 return buf7, class SSIMNew(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIMNew, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Siddharth-Shrivastava7/DANNet
SSIM
false
14,418
[ "Apache-2.0" ]
61
8db10056a4e445d24fc899505923615457cae5b7
https://github.com/Siddharth-Shrivastava7/DANNet/tree/8db10056a4e445d24fc899505923615457cae5b7
LanguageModelCriterion
import torch import torch.nn as nn from torch.autograd import * class LanguageModelCriterion(nn.Module): def __init__(self): super(LanguageModelCriterion, self).__init__() def forward(self, input, target, mask): target = target[:, :input.size(1)] mask = mask[:, :input.size(1)] output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask output = torch.sum(output) / torch.sum(mask) return output def get_inputs(): return [torch.ones([4, 4, 4], dtype=torch.int64), torch.ones([4, 4], dtype=torch.int64), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from torch.autograd import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp9 = tl.load(in_ptr2 + r0, None) tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4), 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r0), None, eviction_policy= 'evict_last') tmp7 = -tmp6 tmp8 = tmp7.to(tl.float32) tmp10 = tmp8 * tmp9 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tmp14 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp13 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_neg_sum_0[grid(1)](buf2, arg1_1, arg0_1, arg2_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class LanguageModelCriterionNew(nn.Module): def __init__(self): super(LanguageModelCriterionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
SikandarBakht/Sub-GC
LanguageModelCriterion
false
14,419
[ "MIT" ]
71
5b89aff766df0b11446cf970fb285004ebfef672
https://github.com/SikandarBakht/Sub-GC/tree/5b89aff766df0b11446cf970fb285004ebfef672
PairwiseRankingLoss
import torch import torch.nn as nn class PairwiseRankingLoss(nn.Module): """ Pairwise ranking loss """ def __init__(self, margin): super(PairwiseRankingLoss, self).__init__() self.margin = margin def forward(self, anchor1, anchor2, img_sentc, sent_imgc): cost_sent = torch.clamp(self.margin - anchor1 + img_sentc, min=0.0 ).sum() cost_img = torch.clamp(self.margin - anchor2 + sent_imgc, min=0.0).sum( ) loss = cost_sent + cost_img return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'margin': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_clamp_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp10 = tl.load(in_ptr2 + r0, None) tmp12 = tl.load(in_ptr3 + r0, None) tmp1 = 4.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = tl.broadcast_to(tmp6, [RBLOCK]) tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0)) tmp11 = tmp1 - tmp10 tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp13, tmp5) tmp15 = tl.broadcast_to(tmp14, [RBLOCK]) tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0)) tmp18 = tmp9 + tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None) def call(args): arg0_1, arg1_1, arg2_1, arg3_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_clamp_rsub_sum_0[grid(1)](buf2, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 return buf2, class PairwiseRankingLossNew(nn.Module): """ Pairwise ranking loss """ def __init__(self, margin): super(PairwiseRankingLossNew, self).__init__() self.margin = margin def forward(self, input_0, input_1, input_2, input_3): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 arg3_1 = input_3 output = call([arg0_1, arg1_1, arg2_1, arg3_1]) return output[0]
SilanHe/e-SNLI
PairwiseRankingLoss
false
14,420
[ "MIT" ]
125
1c38981f50f931e45cf06146e693c588bc89b78d
https://github.com/SilanHe/e-SNLI/tree/1c38981f50f931e45cf06146e693c588bc89b78d
SPPModule
import torch import torch.nn as nn import torch.nn.functional as F class SPPModule(nn.Module): def __init__(self, num_levels, pool_type='max_pool'): super(SPPModule, self).__init__() self.num_levels = num_levels self.pool_type = pool_type def forward(self, x): _bs, _c, _h, _w = x.size() pooling_layers = [x] for i in range(self.num_levels): kernel_size = 4 * (i + 1) + 1 padding = (kernel_size - 1) // 2 if self.pool_type == 'max_pool': tensor = F.max_pool2d(x, kernel_size=kernel_size, stride=1, padding=padding) else: tensor = F.avg_pool2d(x, kernel_size=kernel_size, stride=1, padding=padding) pooling_layers.append(tensor) x = torch.cat(pooling_layers, dim=1) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_levels': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x7 = xindex x3 = xindex // 64 x4 = xindex % 64 tmp116 = tl.load(in_ptr0 + x7, xmask) tmp0 = -2 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -2 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-10 + x7), tmp10 & xmask, other=float('-inf')) tmp12 = -1 + x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-9 + x7), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-8 + x7), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = 1 + x0 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp5 & tmp29 tmp31 = tl.load(in_ptr0 + (-7 + x7), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = 2 + x0 tmp34 = tmp33 >= tmp1 tmp35 = tmp33 < tmp3 tmp36 = tmp34 & tmp35 tmp37 = tmp5 & tmp36 tmp38 = tl.load(in_ptr0 + (-6 + x7), tmp37 & xmask, other=float('-inf')) tmp39 = triton_helpers.maximum(tmp38, tmp32) tmp40 = -1 + x1 tmp41 = tmp40 >= tmp1 tmp42 = tmp40 < tmp3 tmp43 = tmp41 & tmp42 tmp44 = tmp43 & tmp9 tmp45 = tl.load(in_ptr0 + (-6 + x7), tmp44 & xmask, other=float('-inf')) tmp46 = triton_helpers.maximum(tmp45, tmp39) tmp47 = tmp43 & tmp15 tmp48 = tl.load(in_ptr0 + (-5 + x7), tmp47 & xmask, other=float('-inf')) tmp49 = triton_helpers.maximum(tmp48, tmp46) tmp50 = tmp43 & tmp22 tmp51 = tl.load(in_ptr0 + (-4 + x7), tmp50 & xmask, other=float('-inf')) tmp52 = triton_helpers.maximum(tmp51, tmp49) tmp53 = tmp43 & tmp29 tmp54 = tl.load(in_ptr0 + (-3 + x7), tmp53 & xmask, other=float('-inf')) tmp55 = triton_helpers.maximum(tmp54, tmp52) tmp56 = tmp43 & tmp36 tmp57 = tl.load(in_ptr0 + (-2 + x7), tmp56 & xmask, other=float('-inf')) tmp58 = triton_helpers.maximum(tmp57, tmp55) tmp59 = x1 tmp60 = tmp59 >= tmp1 tmp61 = tmp59 < tmp3 tmp62 = tmp60 & tmp61 tmp63 = tmp62 & tmp9 tmp64 = tl.load(in_ptr0 + (-2 + x7), tmp63 & xmask, other=float('-inf')) tmp65 = triton_helpers.maximum(tmp64, tmp58) tmp66 = tmp62 & tmp15 tmp67 = tl.load(in_ptr0 + (-1 + x7), tmp66 & xmask, other=float('-inf')) tmp68 = triton_helpers.maximum(tmp67, tmp65) tmp69 = tmp62 & tmp22 tmp70 = tl.load(in_ptr0 + x7, tmp69 & xmask, other=float('-inf')) tmp71 = triton_helpers.maximum(tmp70, tmp68) tmp72 = tmp62 & tmp29 tmp73 = tl.load(in_ptr0 + (1 + x7), tmp72 & xmask, other=float('-inf')) tmp74 = triton_helpers.maximum(tmp73, tmp71) tmp75 = tmp62 & tmp36 tmp76 = tl.load(in_ptr0 + (2 + x7), tmp75 & xmask, other=float('-inf')) tmp77 = triton_helpers.maximum(tmp76, tmp74) tmp78 = 1 + x1 tmp79 = tmp78 >= tmp1 tmp80 = tmp78 < tmp3 tmp81 = tmp79 & tmp80 tmp82 = tmp81 & tmp9 tmp83 = tl.load(in_ptr0 + (2 + x7), tmp82 & xmask, other=float('-inf')) tmp84 = triton_helpers.maximum(tmp83, tmp77) tmp85 = tmp81 & tmp15 tmp86 = tl.load(in_ptr0 + (3 + x7), tmp85 & xmask, other=float('-inf')) tmp87 = triton_helpers.maximum(tmp86, tmp84) tmp88 = tmp81 & tmp22 tmp89 = tl.load(in_ptr0 + (4 + x7), tmp88 & xmask, other=float('-inf')) tmp90 = triton_helpers.maximum(tmp89, tmp87) tmp91 = tmp81 & tmp29 tmp92 = tl.load(in_ptr0 + (5 + x7), tmp91 & xmask, other=float('-inf')) tmp93 = triton_helpers.maximum(tmp92, tmp90) tmp94 = tmp81 & tmp36 tmp95 = tl.load(in_ptr0 + (6 + x7), tmp94 & xmask, other=float('-inf')) tmp96 = triton_helpers.maximum(tmp95, tmp93) tmp97 = 2 + x1 tmp98 = tmp97 >= tmp1 tmp99 = tmp97 < tmp3 tmp100 = tmp98 & tmp99 tmp101 = tmp100 & tmp9 tmp102 = tl.load(in_ptr0 + (6 + x7), tmp101 & xmask, other=float('-inf')) tmp103 = triton_helpers.maximum(tmp102, tmp96) tmp104 = tmp100 & tmp15 tmp105 = tl.load(in_ptr0 + (7 + x7), tmp104 & xmask, other=float('-inf')) tmp106 = triton_helpers.maximum(tmp105, tmp103) tmp107 = tmp100 & tmp22 tmp108 = tl.load(in_ptr0 + (8 + x7), tmp107 & xmask, other=float('-inf')) tmp109 = triton_helpers.maximum(tmp108, tmp106) tmp110 = tmp100 & tmp29 tmp111 = tl.load(in_ptr0 + (9 + x7), tmp110 & xmask, other=float('-inf')) tmp112 = triton_helpers.maximum(tmp111, tmp109) tmp113 = tmp100 & tmp36 tmp114 = tl.load(in_ptr0 + (10 + x7), tmp113 & xmask, other=float('-inf')) tmp115 = triton_helpers.maximum(tmp114, tmp112) tl.store(out_ptr0 + (x4 + 320 * x3), tmp115, xmask) tl.store(out_ptr1 + (x4 + 320 * x3), tmp116, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 x1 = xindex // 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tl.store(out_ptr0 + (x0 + 320 * x1), tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf14 = empty_strided_cuda((4, 20, 4, 4), (320, 16, 4, 1), torch. float32) buf0 = reinterpret_tensor(buf14, (4, 4, 4, 4), (320, 16, 4, 1), 64) buf10 = reinterpret_tensor(buf14, (4, 4, 4, 4), (320, 16, 4, 1), 0) get_raw_stream(0) triton_poi_fused_cat_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1) buf1 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [9, 9 ], [1, 1], [4, 4]) buf2 = buf1[0] del buf1 buf4 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [13, 13], [1, 1], [6, 6]) buf5 = buf4[0] del buf4 buf7 = torch.ops.aten.max_pool2d_with_indices.default(arg0_1, [17, 17], [1, 1], [8, 8]) del arg0_1 buf8 = buf7[0] del buf7 buf11 = reinterpret_tensor(buf14, (4, 4, 4, 4), (320, 16, 4, 1), 128) triton_poi_fused_cat_1[grid(256)](buf2, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 buf12 = reinterpret_tensor(buf14, (4, 4, 4, 4), (320, 16, 4, 1), 192) triton_poi_fused_cat_1[grid(256)](buf5, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 buf13 = reinterpret_tensor(buf14, (4, 4, 4, 4), (320, 16, 4, 1), 256) triton_poi_fused_cat_1[grid(256)](buf8, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf8 return buf14, class SPPModuleNew(nn.Module): def __init__(self, num_levels, pool_type='max_pool'): super(SPPModuleNew, self).__init__() self.num_levels = num_levels self.pool_type = pool_type def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ShuangXieIrene/ssds.pytorch
SPPModule
false
14,421
[ "MIT" ]
661
b5ec682a42c923afe964205b21448e9f141d55bc
https://github.com/ShuangXieIrene/ssds.pytorch/tree/b5ec682a42c923afe964205b21448e9f141d55bc
GRULRCell
import torch import torch.nn as nn import torch.onnx from itertools import product as product def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class GRULRCell(RNNCell): """ GRU LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 4 matrices if not None else creates 3 matrices) uRank = rank of U matrix (creates 4 matrices if not None else creates 3 matrices) GRU architecture and compression techniques are found in GRU(LINK) paper Basic architecture is like: r_t = gate_nl(W1x_t + U1h_{t-1} + B_r) z_t = gate_nl(W2x_t + U2h_{t-1} + B_g) h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='GRULR'): super(GRULRCell, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_r = nn.Parameter(torch.ones([1, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'GRULR' def forward(self, input, state): if self._wRank is None: wComp1 = torch.matmul(input, self.W1) wComp2 = torch.matmul(input, self.W2) wComp3 = torch.matmul(input, self.W3) else: wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1) wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2) wComp3 = torch.matmul(torch.matmul(input, self.W), self.W3) if self._uRank is None: uComp1 = torch.matmul(state, self.U1) uComp2 = torch.matmul(state, self.U2) else: uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1) uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2) pre_comp1 = wComp1 + uComp1 pre_comp2 = wComp2 + uComp2 r = gen_nonlinearity(pre_comp1 + self.bias_r, self._gate_nonlinearity) z = gen_nonlinearity(pre_comp2 + self.bias_gate, self. _gate_nonlinearity) if self._uRank is None: pre_comp3 = wComp3 + torch.matmul(r * state, self.U3) else: pre_comp3 = wComp3 + torch.matmul(torch.matmul(r * state, self. U), self.U3) c = gen_nonlinearity(pre_comp3 + self.bias_update, self. _update_nonlinearity) new_h = z * state + (1.0 - z) * c return new_h def getVars(self): Vars = [] if self._num_W_matrices == 3: Vars.extend([self.W1, self.W2, self.W3]) else: Vars.extend([self.W, self.W1, self.W2, self.W3]) if self._num_U_matrices == 3: Vars.extend([self.U1, self.U2, self.U3]) else: Vars.extend([self.U, self.U1, self.U2, self.U3]) Vars.extend([self.bias_r, self.bias_gate, self.bias_update]) return Vars def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.onnx from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp7 = tmp5 * tmp6 tmp8 = 1.0 tmp9 = tmp8 - tmp5 tmp10 = tmp5 * tmp9 tl.store(out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr1 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_out_ptr1 + x2, xmask) tmp7 = tl.load(in_ptr2 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.sigmoid(tmp4) tmp8 = tmp6 + tmp7 tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp13 = tmp5 * tmp12 tmp14 = 1.0 tmp15 = tmp14 - tmp5 tmp16 = tmp15 * tmp11 tmp17 = tmp13 + tmp16 tl.store(in_out_ptr0 + x2, tmp5, xmask) tl.store(in_out_ptr1 + x2, tmp11, xmask) tl.store(out_ptr0 + x2, tmp17, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1, 4), (4, 1)) assert_size_stride(primals_10, (4, 4), (4, 1)) assert_size_stride(primals_11, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_4, out=buf2) del primals_4 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_5, out=buf3) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_7, out=buf4) del primals_7 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0, buf3, primals_8, primals_6, buf6, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 buf7 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), primals_10, out=buf7) buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf5, buf8, buf4, primals_9, buf7, primals_11, primals_6, buf9, 256, XBLOCK =128, num_warps=4, num_stages=1) del buf4 del buf7 del primals_11 del primals_9 return buf9, primals_6, buf5, buf8, reinterpret_tensor(buf6, (4, 64), ( 1, 4), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0 ), buf10, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def gen_nonlinearity(A, nonlinearity): """ Returns required activation for a tensor based on the inputs nonlinearity is either a callable or a value in ['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4'] """ if nonlinearity == 'tanh': return torch.tanh(A) elif nonlinearity == 'sigmoid': return torch.sigmoid(A) elif nonlinearity == 'relu': return torch.relu(A, 0.0) elif nonlinearity == 'quantTanh': return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch. ones_like(A)) elif nonlinearity == 'quantSigm': A = (A + 1.0) / 2.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) elif nonlinearity == 'quantSigm4': A = (A + 2.0) / 4.0 return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A)) else: if not callable(nonlinearity): raise ValueError( 'nonlinearity is either a callable or a value ' + "['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'") return nonlinearity(A) class RNNCell(nn.Module): def __init__(self, input_size, hidden_size, gate_nonlinearity, update_nonlinearity, num_W_matrices, num_U_matrices, num_biases, wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0): super(RNNCell, self).__init__() self._input_size = input_size self._hidden_size = hidden_size self._gate_nonlinearity = gate_nonlinearity self._update_nonlinearity = update_nonlinearity self._num_W_matrices = num_W_matrices self._num_U_matrices = num_U_matrices self._num_biases = num_biases self._num_weight_matrices = [self._num_W_matrices, self. _num_U_matrices, self._num_biases] self._wRank = wRank self._uRank = uRank self._wSparsity = wSparsity self._uSparsity = uSparsity self.oldmats = [] @property def state_size(self): return self._hidden_size @property def input_size(self): return self._input_size @property def output_size(self): return self._hidden_size @property def gate_nonlinearity(self): return self._gate_nonlinearity @property def update_nonlinearity(self): return self._update_nonlinearity @property def wRank(self): return self._wRank @property def uRank(self): return self._uRank @property def num_W_matrices(self): return self._num_W_matrices @property def num_U_matrices(self): return self._num_U_matrices @property def num_weight_matrices(self): return self._num_weight_matrices @property def name(self): raise NotImplementedError() def forward(self, input, state): raise NotImplementedError() def getVars(self): raise NotImplementedError() def get_model_size(self): """ Function to get aimed model size """ mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices totalnnz = 2 for i in range(0, endW): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity) mats[i] for i in range(endW, endU): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity) mats[i] for i in range(endU, len(mats)): mats[i].device totalnnz += utils.countNNZ(mats[i].cpu(), False) mats[i] return totalnnz * 4 def copy_previous_UW(self): mats = self.getVars() num_mats = self._num_W_matrices + self._num_U_matrices if len(self.oldmats) != num_mats: for i in range(num_mats): self.oldmats.append(torch.FloatTensor()) for i in range(num_mats): self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone()) def sparsify(self): mats = self.getVars() endW = self._num_W_matrices endU = endW + self._num_U_matrices for i in range(0, endW): mats[i] = utils.hardThreshold(mats[i], self._wSparsity) for i in range(endW, endU): mats[i] = utils.hardThreshold(mats[i], self._uSparsity) self.W.data.copy_(mats[0]) self.U.data.copy_(mats[1]) def sparsifyWithSupport(self): mats = self.getVars() endU = self._num_W_matrices + self._num_U_matrices for i in range(0, endU): mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i]) class GRULRCellNew(RNNCell): """ GRU LR Cell with Both Full Rank and Low Rank Formulations Has multiple activation functions for the gates hidden_size = # hidden units gate_nonlinearity = nonlinearity for the gate can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] update_nonlinearity = nonlinearity for final rnn update can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm] wRank = rank of W matrix (creates 4 matrices if not None else creates 3 matrices) uRank = rank of U matrix (creates 4 matrices if not None else creates 3 matrices) GRU architecture and compression techniques are found in GRU(LINK) paper Basic architecture is like: r_t = gate_nl(W1x_t + U1h_{t-1} + B_r) z_t = gate_nl(W2x_t + U2h_{t-1} + B_g) h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h) h_t = z_t*h_{t-1} + (1-z_t)*h_t^ Wi and Ui can further parameterised into low rank version by Wi = matmul(W, W_i) and Ui = matmul(U, U_i) """ def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid', update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, name='GRULR'): super(GRULRCellNew, self).__init__(input_size, hidden_size, gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank, wSparsity, uSparsity) if wRank is not None: self._num_W_matrices += 1 self._num_weight_matrices[0] = self._num_W_matrices if uRank is not None: self._num_U_matrices += 1 self._num_weight_matrices[1] = self._num_U_matrices self._name = name if wRank is None: self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]) ) else: self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank])) self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size])) if uRank is None: self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])) else: self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank])) self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size])) self.bias_r = nn.Parameter(torch.ones([1, hidden_size])) self.bias_gate = nn.Parameter(torch.ones([1, hidden_size])) self.bias_update = nn.Parameter(torch.ones([1, hidden_size])) self._device = self.bias_update.device @property def name(self): return self._name @property def cellType(self): return 'GRULR' def getVars(self): Vars = [] if self._num_W_matrices == 3: Vars.extend([self.W1, self.W2, self.W3]) else: Vars.extend([self.W, self.W1, self.W2, self.W3]) if self._num_U_matrices == 3: Vars.extend([self.U1, self.U2, self.U3]) else: Vars.extend([self.U, self.U1, self.U2, self.U3]) Vars.extend([self.bias_r, self.bias_gate, self.bias_update]) return Vars def forward(self, input_0, input_1): primals_1 = self.W1 primals_3 = self.W2 primals_4 = self.W3 primals_5 = self.U1 primals_7 = self.U2 primals_10 = self.U3 primals_8 = self.bias_r primals_9 = self.bias_gate primals_11 = self.bias_update primals_2 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML
GRULRCell
false
14,422
[ "MIT" ]
719
ef9f8a77f096acbdeb941014791f8eda1c1bc35b
https://github.com/Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML/tree/ef9f8a77f096acbdeb941014791f8eda1c1bc35b
NTM
from _paritybench_helpers import _mock_config import logging import torch import numpy as np from torch.nn import functional as F import torch.utils.data import torch.nn as nn class NTM(nn.Module): def __init__(self, opt, hidden_dim=500, l1_strength=0.001): super(NTM, self).__init__() self.input_dim = opt.bow_vocab_size self.topic_num = opt.topic_num topic_num = opt.topic_num self.fc11 = nn.Linear(self.input_dim, hidden_dim) self.fc12 = nn.Linear(hidden_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, topic_num) self.fc22 = nn.Linear(hidden_dim, topic_num) self.fcs = nn.Linear(self.input_dim, hidden_dim, bias=False) self.fcg1 = nn.Linear(topic_num, topic_num) self.fcg2 = nn.Linear(topic_num, topic_num) self.fcg3 = nn.Linear(topic_num, topic_num) self.fcg4 = nn.Linear(topic_num, topic_num) self.fcd1 = nn.Linear(topic_num, self.input_dim) self.l1_strength = torch.FloatTensor([l1_strength]) def encode(self, x): e1 = F.relu(self.fc11(x)) e1 = F.relu(self.fc12(e1)) e1 = e1.add(self.fcs(x)) return self.fc21(e1), self.fc22(e1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def generate(self, h): g1 = torch.tanh(self.fcg1(h)) g1 = torch.tanh(self.fcg2(g1)) g1 = torch.tanh(self.fcg3(g1)) g1 = torch.tanh(self.fcg4(g1)) g1 = g1.add(h) return g1 def decode(self, z): d1 = F.softmax(self.fcd1(z), dim=1) return d1 def forward(self, x): mu, logvar = self.encode(x.view(-1, self.input_dim)) z = self.reparameterize(mu, logvar) g = self.generate(z) return z, g, self.decode(g), mu, logvar def print_topic_words(self, vocab_dic, fn, n_top_words=10): beta_exp = self.fcd1.weight.data.cpu().numpy().T logging.info('Writing to %s' % fn) fw = open(fn, 'w') for k, beta_k in enumerate(beta_exp): topic_words = [vocab_dic[w_id] for w_id in np.argsort(beta_k)[: -n_top_words - 1:-1]] None fw.write('{}\n'.format(' '.join(topic_words))) fw.close() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'opt': _mock_config(bow_vocab_size=4, topic_num=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import logging import numpy as np from torch.nn import functional as F import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32000 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 500 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_add_tanh_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = tmp1 + tmp2 tl.store(out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (500, 4), (4, 1)) assert_size_stride(primals_3, (500,), (1,)) assert_size_stride(primals_4, (500, 500), (500, 1)) assert_size_stride(primals_5, (500,), (1,)) assert_size_stride(primals_6, (500, 4), (4, 1)) assert_size_stride(primals_7, (4, 500), (500, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 500), (500, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4, 4), (4, 1)) assert_size_stride(primals_18, (4,), (1,)) assert_size_stride(primals_19, (4, 4), (4, 1)) assert_size_stride(primals_20, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 500), (1, 4), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(32000)](buf1, primals_3, 32000, XBLOCK =256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (500, 500), ( 1, 500), 0), out=buf2) buf3 = buf2 del buf2 buf18 = empty_strided_cuda((64, 500), (500, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(32000)](buf3, primals_5, buf18, 32000, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 500), (500, 1), torch.float32) extern_kernels.addmm(buf3, reinterpret_tensor(primals_1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_6, (4, 500), (1, 4), 0), alpha=1, beta=1, out=buf4) del buf3 del primals_6 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_8, buf4, reinterpret_tensor(primals_7, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_10, buf4, reinterpret_tensor(primals_9, (500, 4), (1, 500), 0), alpha=1, beta=1, out=buf6) del primals_10 buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf7) buf8 = buf7 del buf7 triton_poi_fused_tanh_2[grid(256)](buf8, primals_12, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_12 buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf8, reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf9) buf10 = buf9 del buf9 triton_poi_fused_tanh_2[grid(256)](buf10, primals_14, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_14 buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_tanh_2[grid(256)](buf12, primals_16, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_16 buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_18, buf12, reinterpret_tensor( primals_17, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_18 buf14 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused_add_tanh_3[grid(256)](buf13, buf5, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1) buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_20, buf14, reinterpret_tensor( primals_19, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_20 buf16 = empty_strided_cuda((64, 4), (4, 1), torch.float32) triton_poi_fused__softmax_4[grid(256)](buf15, buf16, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf17 = buf15 del buf15 triton_poi_fused__softmax_5[grid(256)](buf16, buf17, 256, XBLOCK= 128, num_warps=4, num_stages=1) del buf16 return (buf5, buf14, buf17, buf6, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf4, buf5, buf8, buf10, buf12, buf13, buf14, buf17, primals_19, primals_17, primals_15, primals_13, primals_11, primals_9, primals_7, buf18, primals_4) class NTMNew(nn.Module): def __init__(self, opt, hidden_dim=500, l1_strength=0.001): super(NTMNew, self).__init__() self.input_dim = opt.bow_vocab_size self.topic_num = opt.topic_num topic_num = opt.topic_num self.fc11 = nn.Linear(self.input_dim, hidden_dim) self.fc12 = nn.Linear(hidden_dim, hidden_dim) self.fc21 = nn.Linear(hidden_dim, topic_num) self.fc22 = nn.Linear(hidden_dim, topic_num) self.fcs = nn.Linear(self.input_dim, hidden_dim, bias=False) self.fcg1 = nn.Linear(topic_num, topic_num) self.fcg2 = nn.Linear(topic_num, topic_num) self.fcg3 = nn.Linear(topic_num, topic_num) self.fcg4 = nn.Linear(topic_num, topic_num) self.fcd1 = nn.Linear(topic_num, self.input_dim) self.l1_strength = torch.FloatTensor([l1_strength]) def encode(self, x): e1 = F.relu(self.fc11(x)) e1 = F.relu(self.fc12(e1)) e1 = e1.add(self.fcs(x)) return self.fc21(e1), self.fc22(e1) def reparameterize(self, mu, logvar): if self.training: std = torch.exp(0.5 * logvar) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def generate(self, h): g1 = torch.tanh(self.fcg1(h)) g1 = torch.tanh(self.fcg2(g1)) g1 = torch.tanh(self.fcg3(g1)) g1 = torch.tanh(self.fcg4(g1)) g1 = g1.add(h) return g1 def decode(self, z): d1 = F.softmax(self.fcd1(z), dim=1) return d1 def print_topic_words(self, vocab_dic, fn, n_top_words=10): beta_exp = self.fcd1.weight.data.cpu().numpy().T logging.info('Writing to %s' % fn) fw = open(fn, 'w') for k, beta_k in enumerate(beta_exp): topic_words = [vocab_dic[w_id] for w_id in np.argsort(beta_k)[: -n_top_words - 1:-1]] None fw.write('{}\n'.format(' '.join(topic_words))) fw.close() def forward(self, input_0): primals_2 = self.fc11.weight primals_3 = self.fc11.bias primals_4 = self.fc12.weight primals_5 = self.fc12.bias primals_7 = self.fc21.weight primals_8 = self.fc21.bias primals_9 = self.fc22.weight primals_10 = self.fc22.bias primals_6 = self.fcs.weight primals_11 = self.fcg1.weight primals_12 = self.fcg1.bias primals_13 = self.fcg2.weight primals_14 = self.fcg2.bias primals_15 = self.fcg3.weight primals_16 = self.fcg3.bias primals_17 = self.fcg4.weight primals_18 = self.fcg4.bias primals_19 = self.fcd1.weight primals_20 = self.fcd1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20]) return output[0], output[1], output[2], output[3], output[4]
Nullius-2020/TAKG-Paddle
NTM
false
14,423
[ "MIT" ]
130
7ebb5c4cdd1d2c68b1ca4a518b73c5e815fc5812
https://github.com/Nullius-2020/TAKG-Paddle/tree/7ebb5c4cdd1d2c68b1ca4a518b73c5e815fc5812
BertPSIHead
from _paritybench_helpers import _mock_config import torch from torch import nn class BertPSIHead(nn.Module): def __init__(self, config): super().__init__() self.transform = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() self.decoder = nn.Linear(config.hidden_size, 2, bias=False) self.bias = nn.Parameter(torch.zeros(2)) self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = hidden_states[:, 0] hidden_states = self.transform(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (2, 4), (4, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK= 64, num_warps=1, num_stages=1) del primals_3 buf3 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 2), (8, 2, 1), 0 ), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4 class BertPSIHeadNew(nn.Module): def __init__(self, config): super().__init__() self.transform = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() self.decoder = nn.Linear(config.hidden_size, 2, bias=False) self.bias = nn.Parameter(torch.zeros(2)) self.decoder.bias = self.bias def forward(self, input_0): primals_5 = self.bias primals_2 = self.transform.weight primals_3 = self.transform.bias primals_4 = self.decoder.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Sologa/awesome-align
BertPSIHead
false
14,424
[ "BSD-3-Clause" ]
173
62eaae7eac9bac06c10627fac6cc942c07a50e64
https://github.com/Sologa/awesome-align/tree/62eaae7eac9bac06c10627fac6cc942c07a50e64
Net
import torch from torch import nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(2048, 2048, kernel_size=1) def forward(self, x): x = F.relu(self.conv1(x)) return x def get_inputs(): return [torch.rand([4, 2048, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 2048 y1 = yindex // 2048 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), tmp0, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 2048 y1 = yindex // 2048 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 2048 * x2 + 8388608 * y1), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp4, None) tl.store(out_ptr1 + (y0 + 2048 * x2 + 8388608 * y1), tmp6, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (2048, 2048, 1, 1), (2048, 1, 1, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 2048, 64, 64), (8388608, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 1, 131072, 2048), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(8192, 4096)](primals_3, buf0, 8192, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 2048, 64, 64), (8388608, 1, 131072, 2048)) buf2 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 4096, 64, 1), torch.float32) buf3 = empty_strided_cuda((4, 2048, 64, 64), (8388608, 1, 131072, 2048), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(8192, 4096) ](buf1, primals_2, buf2, buf3, 8192, 4096, XBLOCK=16, YBLOCK= 256, num_warps=8, num_stages=1) del buf1 del primals_2 return buf2, primals_1, buf0, buf3 class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(2048, 2048, kernel_size=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ReyhaneAskari/pytorch_experiments
Net
false
14,425
[ "MIT" ]
60
43d2efbc08c9dd6275530c4bf49c68772f8afb75
https://github.com/ReyhaneAskari/pytorch_experiments/tree/43d2efbc08c9dd6275530c4bf49c68772f8afb75
FCDiscriminator
import torch import torch.nn as nn class FCDiscriminator(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminator, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=1, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 4, kernel_size=4, stride=1, padding=1) self.classifier = nn.Conv2d(ndf * 4, 1, kernel_size=4, stride=1, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, x): x = self.conv1(x) x = self.leaky_relu(x) x = self.conv2(x) x = self.leaky_relu(x) x = self.conv3(x) x = self.leaky_relu(x) x = self.conv4(x) x = self.leaky_relu(x) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 1024 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 128 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_leaky_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 230400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 225 % 256 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 196 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 676 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (64, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 4, 4), (1024, 16, 4, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 4, 4), (2048, 16, 4, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (1, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_11, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 32, 32), (65536, 1024, 32, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(262144)](buf1, primals_2, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 128, 16, 16), (32768, 256, 16, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_leaky_relu_1[grid(131072)](buf3, primals_5, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 256, 15, 15), (57600, 225, 15, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_leaky_relu_2[grid(230400)](buf5, primals_7, 230400, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 14, 14), (50176, 196, 14, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_leaky_relu_3[grid(200704)](buf7, primals_9, 200704, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 1, 13, 13), (169, 169, 13, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(676)](buf9, primals_11, 676, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7) class FCDiscriminatorNew(nn.Module): def __init__(self, num_classes, ndf=64): super(FCDiscriminatorNew, self).__init__() self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) self.conv2 = nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1 ) self.conv3 = nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=1, padding=1) self.conv4 = nn.Conv2d(ndf * 4, ndf * 4, kernel_size=4, stride=1, padding=1) self.classifier = nn.Conv2d(ndf * 4, 1, kernel_size=4, stride=1, padding=1) self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.classifier.weight primals_11 = self.classifier.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Siddharth-Shrivastava7/DANNet
FCDiscriminator
false
14,426
[ "Apache-2.0" ]
61
8db10056a4e445d24fc899505923615457cae5b7
https://github.com/Siddharth-Shrivastava7/DANNet/tree/8db10056a4e445d24fc899505923615457cae5b7
TranspConv3DBlock
import torch import torch.nn as nn class TranspConv3DBlock(nn.Module): def __init__(self, in_planes, out_planes): super().__init__() self.block = nn.ConvTranspose3d(in_planes, out_planes, kernel_size= 2, stride=2, padding=0, output_padding=0) def forward(self, x): return self.block(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'out_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x1 = xindex // 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2, 2, 2), (32, 8, 4, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 2, 2), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=True, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 8, 8, 8), (2048, 512, 64, 8, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(2048)](buf1, primals_2, 2048, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 8, 8, 8), (512, 64, 8, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) class TranspConv3DBlockNew(nn.Module): def __init__(self, in_planes, out_planes): super().__init__() self.block = nn.ConvTranspose3d(in_planes, out_planes, kernel_size= 2, stride=2, padding=0, output_padding=0) def forward(self, input_0): primals_1 = self.block.weight primals_2 = self.block.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Siyuan89/self-attention-cv
TranspConv3DBlock
false
14,427
[ "MIT" ]
759
b39cde2fb68e05351bf3bc8048f4af13bbab256a
https://github.com/Siyuan89/self-attention-cv/tree/b39cde2fb68e05351bf3bc8048f4af13bbab256a
Entmax15
from torch.autograd import Function import torch from torch import nn def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _entmax_threshold_and_support(X, dim=-1, k=None): """Core computation for 1.5-entmax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply 1.5-entmax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: Xsrt, _ = torch.sort(X, dim=dim, descending=True) else: Xsrt, _ = torch.topk(X, k=k, dim=dim) rho = _make_ix_like(Xsrt, dim) mean = Xsrt.cumsum(dim) / rho mean_sq = (Xsrt ** 2).cumsum(dim) / rho ss = rho * (mean_sq - mean ** 2) delta = (1 - ss) / rho delta_nz = torch.clamp(delta, 0) tau = mean - torch.sqrt(delta_nz) support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim) tau_star = tau.gather(dim, support_size - 1) if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): X_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k) _roll_last(tau_star, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau_star, support_size def entmax15(X, dim=-1, k=None): """1.5-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1. where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply 1.5-entmax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return Entmax15Function.apply(X, dim, k) class Entmax15Function(Function): @classmethod def forward(cls, ctx, X, dim=0, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val X = X / 2 tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k) Y = torch.clamp(X - tau_star, min=0) ** 2 ctx.save_for_backward(Y) return Y @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = Y.sqrt() dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr return dX, None, None class Entmax15(nn.Module): def __init__(self, dim=-1, k=None): """1.5-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1. where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5. Parameters ---------- dim : int The dimension along which to apply 1.5-entmax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(Entmax15, self).__init__() def forward(self, X): return entmax15(X, dim=self.dim, k=self.k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Function from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_div_max_pow_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = r1 tmp12 = tmp11.to(tl.int16) tmp13 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp14 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15, _tmp16 = triton_helpers.sort_with_index(tmp13, tmp14, None, 1, stable=False, descending=True) tmp17 = tmp15 * tmp15 tmp18 = tmp17.to(tl.float32) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp20, = tl.associative_scan((tmp19,), 1, _triton_helper_fn_add0) tmp21 = tmp15.to(tl.float32) tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK]) tmp23, = tl.associative_scan((tmp22,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp10, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp15, xmask) tl.store(out_ptr2 + (r1 + 4 * x0), tmp20, xmask) tl.store(out_ptr3 + (r1 + 4 * x0), tmp23, xmask) @triton.jit def triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp30 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp37 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp47 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp54 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp64 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 / tmp1 tmp4 = tmp3 / tmp1 tmp5 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp1 * tmp6 tmp8 = tmp1 - tmp7 tmp9 = tmp8 / tmp1 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp2 - tmp12 tmp15 = tmp13 <= tmp14 tmp16 = tmp15.to(tl.int64) tmp18 = 2.0 tmp19 = tmp17 / tmp18 tmp21 = tmp20 / tmp18 tmp22 = tmp19 * tmp19 tmp23 = tmp21 - tmp22 tmp24 = tmp18 * tmp23 tmp25 = tmp1 - tmp24 tmp26 = tmp25 / tmp18 tmp27 = triton_helpers.maximum(tmp26, tmp10) tmp28 = libdevice.sqrt(tmp27) tmp29 = tmp19 - tmp28 tmp31 = tmp29 <= tmp30 tmp32 = tmp31.to(tl.int64) tmp33 = tmp16 + tmp32 tmp35 = 3.0 tmp36 = tmp34 / tmp35 tmp38 = tmp37 / tmp35 tmp39 = tmp36 * tmp36 tmp40 = tmp38 - tmp39 tmp41 = tmp35 * tmp40 tmp42 = tmp1 - tmp41 tmp43 = tmp42 / tmp35 tmp44 = triton_helpers.maximum(tmp43, tmp10) tmp45 = libdevice.sqrt(tmp44) tmp46 = tmp36 - tmp45 tmp48 = tmp46 <= tmp47 tmp49 = tmp48.to(tl.int64) tmp50 = tmp33 + tmp49 tmp52 = 4.0 tmp53 = tmp51 / tmp52 tmp55 = tmp54 / tmp52 tmp56 = tmp53 * tmp53 tmp57 = tmp55 - tmp56 tmp58 = tmp52 * tmp57 tmp59 = tmp1 - tmp58 tmp60 = tmp59 / tmp52 tmp61 = triton_helpers.maximum(tmp60, tmp10) tmp62 = libdevice.sqrt(tmp61) tmp63 = tmp53 - tmp62 tmp65 = tmp63 <= tmp64 tmp66 = tmp65.to(tl.int64) tmp67 = tmp50 + tmp66 tl.store(out_ptr0 + x0, tmp67, xmask) @triton.jit def triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 - tmp2 tmp4 = tl.full([XBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~xmask, 'index out of bounds: 0 <= tmp7 < 4') tmp9 = tl.load(in_ptr2 + (tmp7 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp10 = 1 + tmp7 tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tl.load(in_ptr3 + (tmp7 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp14 = tmp13 / tmp11 tmp15 = tmp12 * tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp11 * tmp16 tmp18 = 1.0 tmp19 = tmp18 - tmp17 tmp20 = tmp19 / tmp11 tmp21 = 0.0 tmp22 = triton_helpers.maximum(tmp20, tmp21) tmp23 = libdevice.sqrt(tmp22) tmp24 = tmp12 - tmp23 tmp25 = tmp0 - tmp24 tmp26 = triton_helpers.maximum(tmp25, tmp21) tmp27 = tmp26 * tmp26 tl.store(out_ptr0 + x2, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_div_max_pow_sort_sub_0[grid(64)](arg0_1, buf0, buf1, buf3, buf4, 64, 4, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1[grid(64)]( buf4, buf3, buf1, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = buf1 del buf1 triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2[grid(256)]( buf0, buf5, buf4, buf3, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf3 del buf4 del buf5 return buf6, def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _entmax_threshold_and_support(X, dim=-1, k=None): """Core computation for 1.5-entmax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply 1.5-entmax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: Xsrt, _ = torch.sort(X, dim=dim, descending=True) else: Xsrt, _ = torch.topk(X, k=k, dim=dim) rho = _make_ix_like(Xsrt, dim) mean = Xsrt.cumsum(dim) / rho mean_sq = (Xsrt ** 2).cumsum(dim) / rho ss = rho * (mean_sq - mean ** 2) delta = (1 - ss) / rho delta_nz = torch.clamp(delta, 0) tau = mean - torch.sqrt(delta_nz) support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim) tau_star = tau.gather(dim, support_size - 1) if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): X_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k) _roll_last(tau_star, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau_star, support_size def entmax15(X, dim=-1, k=None): """1.5-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1. where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply 1.5-entmax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return Entmax15Function.apply(X, dim, k) class Entmax15Function(Function): @classmethod def forward(cls, ctx, X, dim=0, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val X = X / 2 tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k) Y = torch.clamp(X - tau_star, min=0) ** 2 ctx.save_for_backward(Y) return Y @classmethod def backward(cls, ctx, dY): Y, = ctx.saved_tensors gppr = Y.sqrt() dX = dY * gppr q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) q = q.unsqueeze(ctx.dim) dX -= q * gppr return dX, None, None class Entmax15New(nn.Module): def __init__(self, dim=-1, k=None): """1.5-entmax: normalizing sparse transform (a la softmax). Solves the optimization problem: max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1. where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5. Parameters ---------- dim : int The dimension along which to apply 1.5-entmax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(Entmax15New, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Sologa/awesome-align
Entmax15
false
14,428
[ "BSD-3-Clause" ]
173
62eaae7eac9bac06c10627fac6cc942c07a50e64
https://github.com/Sologa/awesome-align/tree/62eaae7eac9bac06c10627fac6cc942c07a50e64
CustomSoftplus
import torch import torch.nn as nn import torch.utils.data class Softplus(torch.autograd.Function): @staticmethod def forward(ctx, i): result = torch.log(1 + torch.exp(i)) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): return grad_output * torch.sigmoid(ctx.saved_variables[0]) class CustomSoftplus(nn.Module): def forward(self, input_tensor): return Softplus.apply(input_tensor) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_exp_log_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_exp_log_0[grid(256)](arg0_1, buf0, 256, XBLOCK =256, num_warps=4, num_stages=1) del arg0_1 return buf0, class Softplus(torch.autograd.Function): @staticmethod def forward(ctx, i): result = torch.log(1 + torch.exp(i)) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): return grad_output * torch.sigmoid(ctx.saved_variables[0]) class CustomSoftplusNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
SortAnon/BVAE-TTS
CustomSoftplus
false
14,429
[ "MIT" ]
138
69c2ee0c8bf30fe6133cfa8be68a36916f15bcff
https://github.com/SortAnon/BVAE-TTS/tree/69c2ee0c8bf30fe6133cfa8be68a36916f15bcff
Sparsemax
from torch.autograd import Function import torch from torch import nn def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _sparsemax_threshold_and_support(X, dim=-1, k=None): """Core computation for sparsemax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: topk, _ = torch.sort(X, dim=dim, descending=True) else: topk, _ = torch.topk(X, k=k, dim=dim) topk_cumsum = topk.cumsum(dim) - 1 rhos = _make_ix_like(topk, dim) support = rhos * topk > topk_cumsum support_size = support.sum(dim=dim).unsqueeze(dim) tau = topk_cumsum.gather(dim, support_size - 1) tau /= support_size if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): in_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k) _roll_last(tau, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau, support_size def sparsemax(X, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxFunction.apply(X, dim, k) class SparsemaxFunction(Function): @classmethod def forward(cls, ctx, X, dim=-1, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k) output = torch.clamp(X - tau, min=0) ctx.save_for_backward(supp_size, output) return output @classmethod def backward(cls, ctx, grad_output): supp_size, output = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() grad_input[output == 0] = 0 v_hat = grad_input.sum(dim=dim) / supp_size.squeeze() v_hat = v_hat.unsqueeze(dim) grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) return grad_input, None, None class Sparsemax(nn.Module): def __init__(self, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(Sparsemax, self).__init__() def forward(self, X): return sparsemax(X, dim=self.dim, k=self.k) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch.autograd import Function from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def _triton_helper_fn_add0(arg0_0, arg1_0): tmp0 = arg0_0 + arg1_0 return tmp0 @triton.jit def triton_per_fused_cumsum_max_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 64 RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = r1 tmp10 = tmp9.to(tl.int16) tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True) tmp15 = tmp13.to(tl.float32) tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0) tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask) tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask) tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask) @triton.jit def triton_poi_fused_gt_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp1 * tmp0 tmp4 = tmp3 - tmp1 tmp5 = tmp2 > tmp4 tmp6 = tmp5.to(tl.int64) tmp8 = 2.0 tmp9 = tmp8 * tmp7 tmp11 = tmp10 - tmp1 tmp12 = tmp9 > tmp11 tmp13 = tmp12.to(tl.int64) tmp14 = tmp6 + tmp13 tmp16 = 3.0 tmp17 = tmp16 * tmp15 tmp19 = tmp18 - tmp1 tmp20 = tmp17 > tmp19 tmp21 = tmp20.to(tl.int64) tmp22 = tmp14 + tmp21 tmp24 = 4.0 tmp25 = tmp24 * tmp23 tmp27 = tmp26 - tmp1 tmp28 = tmp25 > tmp27 tmp29 = tmp28.to(tl.int64) tmp30 = tmp22 + tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_clamp_div_gather_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.full([1], 1, tl.int64) tmp3 = tmp1 - tmp2 tmp4 = tl.full([XBLOCK], 4, tl.int32) tmp5 = tmp3 + tmp4 tmp6 = tmp3 < 0 tmp7 = tl.where(tmp6, tmp5, tmp3) tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~xmask, 'index out of bounds: 0 <= tmp7 < 4') tmp9 = tl.load(in_ptr2 + (tmp7 + 4 * x1), xmask, eviction_policy= 'evict_last') tmp10 = 1.0 tmp11 = tmp9 - tmp10 tmp12 = tmp1.to(tl.float32) tmp13 = tmp11 / tmp12 tmp14 = tmp0 - tmp13 tmp15 = 0.0 tmp16 = triton_helpers.maximum(tmp14, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_cumsum_max_sort_sub_0[grid(64)](arg0_1, buf0, buf1, buf3, 64, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused_gt_mul_sub_sum_1[grid(64)](buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = buf1 del buf1 triton_poi_fused_clamp_div_gather_sub_2[grid(256)](buf0, buf4, buf3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del buf3 del buf4 return buf5, def _make_ix_like(X, dim): d = X.size(dim) rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype) view = [1] * X.dim() view[0] = -1 return rho.view(view).transpose(0, dim) def _roll_last(X, dim): if dim == -1: return X elif dim < 0: dim = X.dim() - dim perm = [i for i in range(X.dim()) if i != dim] + [dim] return X.permute(perm) def _sparsemax_threshold_and_support(X, dim=-1, k=None): """Core computation for sparsemax: optimal threshold and support size. Parameters ---------- X : torch.Tensor The input tensor to compute thresholds over. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- tau : torch.Tensor like `X`, with all but the `dim` dimension intact the threshold value for each vector support_size : torch LongTensor, shape like `tau` the number of nonzeros in each vector. """ if k is None or k >= X.shape[dim]: topk, _ = torch.sort(X, dim=dim, descending=True) else: topk, _ = torch.topk(X, k=k, dim=dim) topk_cumsum = topk.cumsum(dim) - 1 rhos = _make_ix_like(topk, dim) support = rhos * topk > topk_cumsum support_size = support.sum(dim=dim).unsqueeze(dim) tau = topk_cumsum.gather(dim, support_size - 1) tau /= support_size if k is not None and k < X.shape[dim]: unsolved = (support_size == k).squeeze(dim) if torch.any(unsolved): in_ = _roll_last(X, dim)[unsolved] tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k) _roll_last(tau, dim)[unsolved] = tau_ _roll_last(support_size, dim)[unsolved] = ss_ return tau, support_size def sparsemax(X, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- X : torch.Tensor The input tensor. dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. Returns ------- P : torch tensor, same shape as X The projection result, such that P.sum(dim=dim) == 1 elementwise. """ return SparsemaxFunction.apply(X, dim, k) class SparsemaxFunction(Function): @classmethod def forward(cls, ctx, X, dim=-1, k=None): ctx.dim = dim max_val, _ = X.max(dim=dim, keepdim=True) X = X - max_val tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k) output = torch.clamp(X - tau, min=0) ctx.save_for_backward(supp_size, output) return output @classmethod def backward(cls, ctx, grad_output): supp_size, output = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() grad_input[output == 0] = 0 v_hat = grad_input.sum(dim=dim) / supp_size.squeeze() v_hat = v_hat.unsqueeze(dim) grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) return grad_input, None, None class SparsemaxNew(nn.Module): def __init__(self, dim=-1, k=None): """sparsemax: normalizing sparse transform (a la softmax). Solves the projection: min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1. Parameters ---------- dim : int The dimension along which to apply sparsemax. k : int or None number of largest elements to partial-sort over. For optimal performance, should be slightly bigger than the expected number of nonzeros in the solution. If the solution is more than k-sparse, this function is recursively called with a 2*k schedule. If `None`, full sorting is performed from the beginning. """ self.dim = dim self.k = k super(SparsemaxNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Sologa/awesome-align
Sparsemax
false
14,430
[ "BSD-3-Clause" ]
173
62eaae7eac9bac06c10627fac6cc942c07a50e64
https://github.com/Sologa/awesome-align/tree/62eaae7eac9bac06c10627fac6cc942c07a50e64
ResNetClassifier
import torch from torch import nn class ResNetClassifier(nn.Module): def __init__(self, n_class, len_feature): super().__init__() self.len_feature = len_feature self.classifier = nn.Linear(self.len_feature, n_class) def forward(self, x): x = x.view(x.size(0), x.size(1), -1) x = x.mean(dim=-1) x = self.classifier(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_class': 4, 'len_feature': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 return buf2, buf1 class ResNetClassifierNew(nn.Module): def __init__(self, n_class, len_feature): super().__init__() self.len_feature = len_feature self.classifier = nn.Linear(self.len_feature, n_class) def forward(self, input_0): primals_2 = self.classifier.weight primals_3 = self.classifier.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Starrah/THU-SuperMoon
ResNetClassifier
false
14,431
[ "MIT" ]
64
1e6b8ccc207f789fb8426806251cc3d4e1cca35a
https://github.com/Starrah/THU-SuperMoon/tree/1e6b8ccc207f789fb8426806251cc3d4e1cca35a
CoreNetwork
import torch import torch.nn as nn import torch.nn.functional as F class CoreNetwork(nn.Module): """The core network. An RNN that maintains an internal state by integrating information extracted from the history of past observations. It encodes the agent's knowledge of the environment through a state vector `h_t` that gets updated at every time step `t`. Concretely, it takes the glimpse representation `g_t` as input, and combines it with its internal state `h_t_prev` at the previous time step, to produce the new internal state `h_t` at the current time step. In other words: `h_t = relu( fc(h_t_prev) + fc(g_t) )` Args: input_size: input size of the rnn. hidden_size: hidden size of the rnn. g_t: a 2D tensor of shape (B, hidden_size). The glimpse representation returned by the glimpse network for the current timestep `t`. h_t_prev: a 2D tensor of shape (B, hidden_size). The hidden state vector for the previous timestep `t-1`. Returns: h_t: a 2D tensor of shape (B, hidden_size). The hidden state vector for the current timestep `t`. """ def __init__(self, input_size, hidden_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, hidden_size) self.h2h = nn.Linear(hidden_size, hidden_size) def forward(self, g_t, h_t_prev): h1 = self.i2h(g_t) h2 = self.h2h(h_t_prev) h_t = F.relu(h1 + h2) return h_t def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(in_out_ptr0 + x2, tmp8, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_add_relu_threshold_backward_0[grid(256)](buf2, primals_2, buf1, primals_5, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_5 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf3 class CoreNetworkNew(nn.Module): """The core network. An RNN that maintains an internal state by integrating information extracted from the history of past observations. It encodes the agent's knowledge of the environment through a state vector `h_t` that gets updated at every time step `t`. Concretely, it takes the glimpse representation `g_t` as input, and combines it with its internal state `h_t_prev` at the previous time step, to produce the new internal state `h_t` at the current time step. In other words: `h_t = relu( fc(h_t_prev) + fc(g_t) )` Args: input_size: input size of the rnn. hidden_size: hidden size of the rnn. g_t: a 2D tensor of shape (B, hidden_size). The glimpse representation returned by the glimpse network for the current timestep `t`. h_t_prev: a 2D tensor of shape (B, hidden_size). The hidden state vector for the previous timestep `t-1`. Returns: h_t: a 2D tensor of shape (B, hidden_size). The hidden state vector for the current timestep `t`. """ def __init__(self, input_size, hidden_size): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.i2h = nn.Linear(input_size, hidden_size) self.h2h = nn.Linear(hidden_size, hidden_size) def forward(self, input_0, input_1): primals_1 = self.i2h.weight primals_2 = self.i2h.bias primals_4 = self.h2h.weight primals_5 = self.h2h.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
SmirnovKol/recurrent-visual-attention
CoreNetwork
false
14,432
[ "MIT" ]
463
4cb8d9e768ae35f38439278bb8a7b4d6b253a537
https://github.com/SmirnovKol/recurrent-visual-attention/tree/4cb8d9e768ae35f38439278bb8a7b4d6b253a537
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch from torch import nn class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask=None): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self. attention_head_size) if attention_mask is not None: attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = context_layer return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_1[grid(256)](buf5, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_2[grid(256)](buf5, buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf5 del buf6 buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf1 triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf8, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf9 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf9, buf10, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf9 return reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf7, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward(self, input_0): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Sologa/awesome-align
BertSelfAttention
false
14,433
[ "BSD-3-Clause" ]
173
62eaae7eac9bac06c10627fac6cc942c07a50e64
https://github.com/Sologa/awesome-align/tree/62eaae7eac9bac06c10627fac6cc942c07a50e64
UpsampleNet
import torch import torch.nn as nn from torch.nn.utils import weight_norm class UpsampleNet(nn.Module): def __init__(self, input_size, output_size, upsample_factor): super(UpsampleNet, self).__init__() self.input_size = input_size self.output_size = output_size self.upsample_factor = upsample_factor layer = nn.ConvTranspose1d(input_size, output_size, upsample_factor * 2, upsample_factor, padding=upsample_factor // 2) self.layer = weight_norm(layer) def forward(self, inputs): outputs = self.layer(inputs) outputs = outputs[:, :, :inputs.size(-1) * self.upsample_factor] return outputs def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'upsample_factor': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.utils import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 32 * x0), xmask, other=0.0) tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.where(xmask, tmp2, 0) tmp5 = tl.sum(tmp4, 1)[:, None] tmp6 = libdevice.sqrt(tmp5) tmp8 = tmp7 / tmp6 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr0 + (r1 + 32 * x0), tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_2, (4, 4, 8), (32, 8, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2, primals_1, buf2, 4, 32, XBLOCK=1, num_warps=2, num_stages=1) buf3 = extern_kernels.convolution(primals_4, buf2, stride=(4,), padding=(2,), dilation=(1,), transposed=True, output_padding=(0 ,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 16), (64, 16, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(256)](buf4, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 return buf4, buf2, primals_1, primals_2, primals_4, buf1, buf2 class UpsampleNetNew(nn.Module): def __init__(self, input_size, output_size, upsample_factor): super(UpsampleNetNew, self).__init__() self.input_size = input_size self.output_size = output_size self.upsample_factor = upsample_factor layer = nn.ConvTranspose1d(input_size, output_size, upsample_factor * 2, upsample_factor, padding=upsample_factor // 2) self.layer = weight_norm(layer) def forward(self, input_0): primals_3 = self.layer.bias primals_1 = self.layer.weight_g primals_2 = self.layer.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
SolomidHero/EA-SVC
UpsampleNet
false
14,434
[ "MIT" ]
88
23a0a9d9c0e9670dd7c777d56b00883d84c23237
https://github.com/SolomidHero/EA-SVC/tree/23a0a9d9c0e9670dd7c777d56b00883d84c23237
BasicModulationBlock
import torch class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) class Conv1dWithInitialization(BaseModule): def __init__(self, **kwargs): super(Conv1dWithInitialization, self).__init__() self.conv1d = torch.nn.Conv1d(**kwargs) torch.nn.init.orthogonal_(self.conv1d.weight.data, gain=1) def forward(self, x): return self.conv1d(x) class FeatureWiseAffine(BaseModule): def __init__(self): super(FeatureWiseAffine, self).__init__() def forward(self, x, scale, shift): outputs = scale * x + shift return outputs class BasicModulationBlock(BaseModule): """ Linear modulation part of UBlock, represented by sequence of the following layers: - Feature-wise Affine - LReLU - 3x1 Conv """ def __init__(self, n_channels, dilation): super(BasicModulationBlock, self).__init__() self.featurewise_affine = FeatureWiseAffine() self.leaky_relu = torch.nn.LeakyReLU(0.2) self.convolution = Conv1dWithInitialization(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding= dilation, dilation=dilation) def forward(self, x, scale, shift): outputs = self.featurewise_affine(x, scale, shift) outputs = self.leaky_relu(outputs) outputs = self.convolution(outputs) return outputs def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_channels': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_leaky_relu_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp3 = tl.load(in_ptr2 + x0, xmask) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tmp5 = 0.0 tmp6 = tmp4 > tmp5 tmp7 = 0.2 tmp8 = tmp4 * tmp7 tmp9 = tl.where(tmp6, tmp4, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_leaky_relu_mul_0[grid(16)](primals_1, primals_2, primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4 ), (0, 4, 1), 0), primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf2, (4, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0) class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) class Conv1dWithInitialization(BaseModule): def __init__(self, **kwargs): super(Conv1dWithInitialization, self).__init__() self.conv1d = torch.nn.Conv1d(**kwargs) torch.nn.init.orthogonal_(self.conv1d.weight.data, gain=1) def forward(self, x): return self.conv1d(x) class FeatureWiseAffine(BaseModule): def __init__(self): super(FeatureWiseAffine, self).__init__() def forward(self, x, scale, shift): outputs = scale * x + shift return outputs class BasicModulationBlockNew(BaseModule): """ Linear modulation part of UBlock, represented by sequence of the following layers: - Feature-wise Affine - LReLU - 3x1 Conv """ def __init__(self, n_channels, dilation): super(BasicModulationBlockNew, self).__init__() self.featurewise_affine = FeatureWiseAffine() self.leaky_relu = torch.nn.LeakyReLU(0.2) self.convolution = Conv1dWithInitialization(in_channels=n_channels, out_channels=n_channels, kernel_size=3, stride=1, padding= dilation, dilation=dilation) def forward(self, input_0, input_1, input_2): primals_4 = self.convolution.conv1d.weight primals_5 = self.convolution.conv1d.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Seungwoo0326/WaveGrad2-1
BasicModulationBlock
false
14,435
[ "MIT" ]
45
3b202201348449b89353f28bce1596ca7939a810
https://github.com/Seungwoo0326/WaveGrad2-1/tree/3b202201348449b89353f28bce1596ca7939a810
LocationNetwork
import torch import torch.nn as nn import torch.nn.functional as F from torch.distributions import Normal class LocationNetwork(nn.Module): """The location network. Uses the internal state `h_t` of the core network to produce the location coordinates `l_t` for the next time step. Concretely, feeds the hidden state `h_t` through a fc layer followed by a tanh to clamp the output beween [-1, 1]. This produces a 2D vector of means used to parametrize a two-component Gaussian with a fixed variance from which the location coordinates `l_t` for the next time step are sampled. Hence, the location `l_t` is chosen stochastically from a distribution conditioned on an affine transformation of the hidden state vector `h_t`. Args: input_size: input size of the fc layer. output_size: output size of the fc layer. std: standard deviation of the normal distribution. h_t: the hidden state vector of the core network for the current time step `t`. Returns: mu: a 2D vector of shape (B, 2). l_t: a 2D vector of shape (B, 2). """ def __init__(self, input_size, output_size, std): super().__init__() self.std = std hid_size = input_size // 2 self.fc = nn.Linear(input_size, hid_size) self.fc_lt = nn.Linear(hid_size, output_size) def forward(self, h_t): feat = F.relu(self.fc(h_t.detach())) mu = torch.tanh(self.fc_lt(feat)) l_t = torch.distributions.Normal(mu, self.std).rsample() l_t = l_t.detach() log_pi = Normal(mu, self.std).log_prob(l_t) log_pi = torch.sum(log_pi, dim=1) l_t = torch.clamp(l_t, -1, 1) return log_pi, l_t def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'output_size': 4, 'std': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_log_mul_neg_pow_sub_sum_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp29 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp39 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp41 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = tmp5 - tmp1 tmp7 = tmp6 * tmp6 tmp8 = -tmp7 tmp9 = 0.03125 tmp10 = tmp8 * tmp9 tmp11 = 1.3862943649291992 tmp12 = tmp10 - tmp11 tmp13 = 0.9189385332046727 tmp14 = tmp12 - tmp13 tmp16 = libdevice.tanh(tmp15) tmp18 = tmp17 * tmp3 tmp19 = tmp16 + tmp18 tmp20 = tmp19 - tmp16 tmp21 = tmp20 * tmp20 tmp22 = -tmp21 tmp23 = tmp22 * tmp9 tmp24 = tmp23 - tmp11 tmp25 = tmp24 - tmp13 tmp26 = tmp14 + tmp25 tmp28 = libdevice.tanh(tmp27) tmp30 = tmp29 * tmp3 tmp31 = tmp28 + tmp30 tmp32 = tmp31 - tmp28 tmp33 = tmp32 * tmp32 tmp34 = -tmp33 tmp35 = tmp34 * tmp9 tmp36 = tmp35 - tmp11 tmp37 = tmp36 - tmp13 tmp38 = tmp26 + tmp37 tmp40 = libdevice.tanh(tmp39) tmp42 = tmp41 * tmp3 tmp43 = tmp40 + tmp42 tmp44 = tmp43 - tmp40 tmp45 = tmp44 * tmp44 tmp46 = -tmp45 tmp47 = tmp46 * tmp9 tmp48 = tmp47 - tmp11 tmp49 = tmp48 - tmp13 tmp50 = tmp38 + tmp49 tl.store(out_ptr0 + x2, tmp50, xmask) @triton.jit def triton_poi_fused_add_clamp_mul_tanh_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tmp1 + tmp4 tmp6 = -1.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = 1.0 tmp9 = triton_helpers.minimum(tmp7, tmp8) tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) assert_size_stride(primals_4, (4, 2), (2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0) del buf0 buf8 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1, primals_3, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 2), ( 2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf4 = torch.ops.aten.normal_functional.default(buf3) buf5 = buf4 del buf4 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_div_log_mul_neg_pow_sub_sum_tanh_1[grid(64)](buf2, buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf7 = buf3 del buf3 triton_poi_fused_add_clamp_mul_tanh_2[grid(256)](buf2, buf5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf6, buf7, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 2), (2, 1), 0 ), buf2, buf5, primals_4, buf8 class LocationNetworkNew(nn.Module): """The location network. Uses the internal state `h_t` of the core network to produce the location coordinates `l_t` for the next time step. Concretely, feeds the hidden state `h_t` through a fc layer followed by a tanh to clamp the output beween [-1, 1]. This produces a 2D vector of means used to parametrize a two-component Gaussian with a fixed variance from which the location coordinates `l_t` for the next time step are sampled. Hence, the location `l_t` is chosen stochastically from a distribution conditioned on an affine transformation of the hidden state vector `h_t`. Args: input_size: input size of the fc layer. output_size: output size of the fc layer. std: standard deviation of the normal distribution. h_t: the hidden state vector of the core network for the current time step `t`. Returns: mu: a 2D vector of shape (B, 2). l_t: a 2D vector of shape (B, 2). """ def __init__(self, input_size, output_size, std): super().__init__() self.std = std hid_size = input_size // 2 self.fc = nn.Linear(input_size, hid_size) self.fc_lt = nn.Linear(hid_size, output_size) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_4 = self.fc_lt.weight primals_5 = self.fc_lt.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
SmirnovKol/recurrent-visual-attention
LocationNetwork
false
14,436
[ "MIT" ]
463
4cb8d9e768ae35f38439278bb8a7b4d6b253a537
https://github.com/SmirnovKol/recurrent-visual-attention/tree/4cb8d9e768ae35f38439278bb8a7b4d6b253a537
BertSelfAttention
from _paritybench_helpers import _mock_config import math import torch from torch import nn import torch.utils.data class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): sz = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*sz) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask, history_states=None): if history_states is None: mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) else: x_states = torch.cat((history_states, hidden_states), dim=1) mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(x_states) mixed_value_layer = self.value(x_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) attention_scores = torch.matmul(query_layer / math.sqrt(self. attention_head_size), key_layer.transpose(-1, -2)) attention_scores = attention_scores + attention_mask attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self. all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, num_attention_heads= 4, attention_probs_dropout_prob=0.5)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_div_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_add_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused__softmax_add_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x4 = xindex % 64 x5 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp5 = tl_math.exp(tmp4) tmp7 = tmp5 / tmp6 tl.store(in_out_ptr0 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_div_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_2[grid(64)](buf5, primals_8, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_add_3[grid(256)](buf8, primals_8, buf6, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_8 buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_7, buf9, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) del primals_7 buf10 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10) buf11 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_4[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf10 return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0 ), buf8, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0) class BertSelfAttentionNew(nn.Module): def __init__(self, config): super(BertSelfAttentionNew, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( 'The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config. num_attention_heads) self.all_head_size = (self.num_attention_heads * self. attention_head_size) self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): sz = x.size()[:-1] + (self.num_attention_heads, self. attention_head_size) x = x.view(*sz) return x.permute(0, 2, 1, 3) def forward(self, input_0, input_1): primals_1 = self.query.weight primals_2 = self.query.bias primals_4 = self.key.weight primals_5 = self.key.bias primals_6 = self.value.weight primals_7 = self.value.bias primals_3 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
SofanHe/UnilmChatchitRobot
BertSelfAttention
false
14,437
[ "Apache-2.0" ]
115
7232d01326ed04ae17cbeb73ce681f30b4391933
https://github.com/SofanHe/UnilmChatchitRobot/tree/7232d01326ed04ae17cbeb73ce681f30b4391933
SeparableConvBlock
import math import torch import torch.utils.data import torch.nn.functional as F from itertools import product as product from math import sqrt as sqrt class Conv2dSamePadding(torch.nn.Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features. """ def __init__(self, *args, **kwargs): """ Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: Args: norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ norm = kwargs.pop('norm', None) activation = kwargs.pop('activation', None) self.padding_method = kwargs.pop('padding', None) if self.padding_method is None: if len(args) >= 5: self.padding_method = args[4] else: self.padding_method = 0 if isinstance(self.padding_method, str): if self.padding_method.upper() == 'SAME': super().__init__(*args, **kwargs, padding=0) if isinstance(self.stride, int): self.stride = [self.stride] * 2 elif len(self.stride) == 1: self.stride = [self.stride[0]] * 2 if isinstance(self.kernel_size, int): self.kernel_size = [self.kernel_size] * 2 elif len(self.kernel_size) == 1: self.kernel_size = [self.kernel_size[0]] * 2 if isinstance(self.dilation, int): self.dilation = [self.dilation] * 2 elif len(self.dilation) == 1: self.dilation = [self.dilation[0]] * 2 else: raise ValueError('Unknown padding method: {}'.format(self. padding_method)) else: super().__init__(*args, **kwargs, padding=self.padding_method) self.norm = norm self.activation = activation def forward(self, x): if isinstance(self.padding_method, str): if self.padding_method.upper() == 'SAME': input_h, input_w = x.shape[-2:] stride_h, stride_w = self.stride kernel_size_h, kernel_size_w = self.kernel_size dilation_h, dilation_w = self.dilation output_h = math.ceil(input_h / stride_h) output_w = math.ceil(input_w / stride_w) padding_needed_h = max(0, (output_h - 1) * stride_h + ( kernel_size_h - 1) * dilation_h + 1 - input_h) padding_needed_w = max(0, (output_w - 1) * stride_w + ( kernel_size_w - 1) * dilation_w + 1 - input_w) left = padding_needed_w // 2 right = padding_needed_w - left top = padding_needed_h // 2 bottom = padding_needed_h - top x = F.pad(x, [left, right, top, bottom]) else: raise ValueError('Unknown padding method: {}'.format(self. padding_method)) x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class SeparableConvBlock(torch.nn.Module): """ Depthwise seperable convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, norm=None, activation=None): """ Args: in_channels (int): the number of input tensor channels. out_channels (int):the number of output tensor channels. kernel_size (int): the kernel size. stride (int or tuple or list): the stride. bias (bool): if `True`, the pointwise conv applies bias. apply_bn (bool): if `True`, apply BN layer after conv layer. norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ super(SeparableConvBlock, self).__init__() self.norm = norm self.activation = activation self.depthwise = Conv2dSamePadding(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) self.pointwise = Conv2dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=bias) if bias: self.bias = self.pointwise.bias def forward(self, inputs): x = self.depthwise(inputs) x = self.pointwise(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.utils.data import torch.nn.functional as F from itertools import product as product from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 1, 4, 4), (16, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf2, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 return buf2, primals_1, primals_2, primals_3, buf0 class Conv2dSamePadding(torch.nn.Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support "SAME" padding mode and more features. """ def __init__(self, *args, **kwargs): """ Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: Args: norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ norm = kwargs.pop('norm', None) activation = kwargs.pop('activation', None) self.padding_method = kwargs.pop('padding', None) if self.padding_method is None: if len(args) >= 5: self.padding_method = args[4] else: self.padding_method = 0 if isinstance(self.padding_method, str): if self.padding_method.upper() == 'SAME': super().__init__(*args, **kwargs, padding=0) if isinstance(self.stride, int): self.stride = [self.stride] * 2 elif len(self.stride) == 1: self.stride = [self.stride[0]] * 2 if isinstance(self.kernel_size, int): self.kernel_size = [self.kernel_size] * 2 elif len(self.kernel_size) == 1: self.kernel_size = [self.kernel_size[0]] * 2 if isinstance(self.dilation, int): self.dilation = [self.dilation] * 2 elif len(self.dilation) == 1: self.dilation = [self.dilation[0]] * 2 else: raise ValueError('Unknown padding method: {}'.format(self. padding_method)) else: super().__init__(*args, **kwargs, padding=self.padding_method) self.norm = norm self.activation = activation def forward(self, x): if isinstance(self.padding_method, str): if self.padding_method.upper() == 'SAME': input_h, input_w = x.shape[-2:] stride_h, stride_w = self.stride kernel_size_h, kernel_size_w = self.kernel_size dilation_h, dilation_w = self.dilation output_h = math.ceil(input_h / stride_h) output_w = math.ceil(input_w / stride_w) padding_needed_h = max(0, (output_h - 1) * stride_h + ( kernel_size_h - 1) * dilation_h + 1 - input_h) padding_needed_w = max(0, (output_w - 1) * stride_w + ( kernel_size_w - 1) * dilation_w + 1 - input_w) left = padding_needed_w // 2 right = padding_needed_w - left top = padding_needed_h // 2 bottom = padding_needed_h - top x = F.pad(x, [left, right, top, bottom]) else: raise ValueError('Unknown padding method: {}'.format(self. padding_method)) x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class SeparableConvBlockNew(torch.nn.Module): """ Depthwise seperable convolution block. """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=True, norm=None, activation=None): """ Args: in_channels (int): the number of input tensor channels. out_channels (int):the number of output tensor channels. kernel_size (int): the kernel size. stride (int or tuple or list): the stride. bias (bool): if `True`, the pointwise conv applies bias. apply_bn (bool): if `True`, apply BN layer after conv layer. norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ super(SeparableConvBlockNew, self).__init__() self.norm = norm self.activation = activation self.depthwise = Conv2dSamePadding(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride= stride, padding=padding, dilation=dilation, groups=in_channels, bias=False) self.pointwise = Conv2dSamePadding(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=bias) if bias: self.bias = self.pointwise.bias def forward(self, input_0): primals_4 = self.bias primals_1 = self.depthwise.weight primals_3 = self.pointwise.weight primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
StevenGrove/DynamicHead
SeparableConvBlock
false
14,438
[ "Apache-2.0" ]
69
d62aa84e1d1c6a0c74d46258ad77b11413c10bef
https://github.com/StevenGrove/DynamicHead/tree/d62aa84e1d1c6a0c74d46258ad77b11413c10bef
CategoricalAccuracy
import torch class _Metric(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): raise NotImplementedError() class Accuracy(_Metric): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): """ :param input: [B, L] :param target: [B, L] :return: """ bool_acc = input.long() == target.long() return bool_acc.sum() / bool_acc.numel() class CategoricalAccuracy(Accuracy): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): """ :param input: [B, T, V] :param target: [B, T] :return: """ input = input.softmax(-1) categorical_input = input.argmax(-1) return super().forward(categorical_input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_argmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = tmp0 / tmp6 tmp8 = tmp1 / tmp6 tmp9 = tmp7 > tmp8 tmp10 = tmp7 == tmp8 tmp11 = tmp7 != tmp7 tmp12 = tmp8 != tmp8 tmp13 = tmp11 > tmp12 tmp14 = tmp9 | tmp13 tmp15 = tmp11 & tmp12 tmp16 = tmp10 | tmp15 tmp17 = tl.full([1], 0, tl.int64) tmp18 = tl.full([1], 1, tl.int64) tmp19 = tmp17 < tmp18 tmp20 = tmp16 & tmp19 tmp21 = tmp14 | tmp20 tmp22 = tl.where(tmp21, tmp7, tmp8) tmp23 = tl.where(tmp21, tmp17, tmp18) tmp24 = tmp3 / tmp6 tmp25 = tmp22 > tmp24 tmp26 = tmp22 == tmp24 tmp27 = tmp22 != tmp22 tmp28 = tmp24 != tmp24 tmp29 = tmp27 > tmp28 tmp30 = tmp25 | tmp29 tmp31 = tmp27 & tmp28 tmp32 = tmp26 | tmp31 tmp33 = tl.full([1], 2, tl.int64) tmp34 = tmp23 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tmp30 | tmp35 tmp37 = tl.where(tmp36, tmp22, tmp24) tmp38 = tl.where(tmp36, tmp23, tmp33) tmp39 = tmp5 / tmp6 tmp40 = tmp37 > tmp39 tmp41 = tmp37 == tmp39 tmp42 = tmp37 != tmp37 tmp43 = tmp39 != tmp39 tmp44 = tmp42 > tmp43 tmp45 = tmp40 | tmp44 tmp46 = tmp42 & tmp43 tmp47 = tmp41 | tmp46 tmp48 = tl.full([1], 3, tl.int64) tmp49 = tmp38 < tmp48 tmp50 = tmp47 & tmp49 tmp51 = tmp45 | tmp50 tl.where(tmp51, tmp37, tmp39) tmp53 = tl.where(tmp51, tmp38, tmp48) tl.store(out_ptr0 + x0, tmp53, xmask) @triton.jit def triton_per_fused__to_copy_div_eq_sum_2(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 64 r2 = rindex tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + r2, None) tmp2 = tmp1.to(tl.int64) tmp3 = tmp0 == tmp2 tmp4 = tmp3.to(tl.int64) tmp5 = tl.broadcast_to(tmp4, [RBLOCK]) tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0)) tmp8 = tmp7.to(tl.float32) tmp9 = 0.00390625 tmp10 = tmp8 * tmp9 tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64) triton_poi_fused__softmax_argmax_1[grid(64)](buf0, buf1, 64, XBLOCK =64, num_warps=1, num_stages=1) del buf0 buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused__to_copy_div_eq_sum_2[grid(1)](buf1, arg1_1, buf3, 1, 256, num_warps=2, num_stages=1) del arg1_1 del buf1 return buf3, class _Metric(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): raise NotImplementedError() class Accuracy(_Metric): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): """ :param input: [B, L] :param target: [B, L] :return: """ bool_acc = input.long() == target.long() return bool_acc.sum() / bool_acc.numel() class CategoricalAccuracyNew(Accuracy): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Stillerman/MusicTransformer-pytorch
CategoricalAccuracy
false
14,439
[ "MIT" ]
170
73abb7cab271beba042b7b6fc06a6a9aaee82e8c
https://github.com/Stillerman/MusicTransformer-pytorch/tree/73abb7cab271beba042b7b6fc06a6a9aaee82e8c
BCEFocalLoss
import torch from torch import nn import torch.nn.functional as F class BCEFocalLoss(nn.Module): def __init__(self, alpha=-1, gamma=2.0, reduction='mean'): super(BCEFocalLoss, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction def forward(self, inputs, targets): p = torch.sigmoid(inputs) ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none') p_t = p * targets + (1 - p) * (1 - targets) loss = ce_loss * (1 - p_t) ** self.gamma if self.alpha >= 0: alpha_t = self.alpha * targets + (1 - self.alpha) * (1 - targets) loss = alpha_t * loss if self.reduction == 'mean': loss = loss.mean() elif self.reduction == 'sum': loss = loss.sum() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.sigmoid(tmp3) tmp14 = tmp13 * tmp0 tmp15 = tmp1 - tmp13 tmp16 = tmp15 * tmp2 tmp17 = tmp14 + tmp16 tmp18 = tmp1 - tmp17 tmp19 = tmp18 * tmp18 tmp20 = tmp12 * tmp19 tmp21 = tl.broadcast_to(tmp20, [RBLOCK]) tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp24 = 256.0 tmp25 = tmp23 / tmp24 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class BCEFocalLossNew(nn.Module): def __init__(self, alpha=-1, gamma=2.0, reduction='mean'): super(BCEFocalLossNew, self).__init__() self.alpha = alpha self.gamma = gamma self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Stochastic-Adventure/ClinicalTransformerRelationExtraction
BCEFocalLoss
false
14,440
[ "MIT" ]
78
eef956bbfbd64b008014ef7cac5f818087816725
https://github.com/Stochastic-Adventure/ClinicalTransformerRelationExtraction/tree/eef956bbfbd64b008014ef7cac5f818087816725
SingleHead
import torch import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt class SingleHead(nn.Module): """ Single head used in CenterNet Head. """ def __init__(self, in_channel, out_channel, bias_fill=False, bias_value=0): super(SingleHead, self).__init__() self.feat_conv = nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1) self.relu = nn.ReLU() self.out_conv = nn.Conv2d(in_channel, out_channel, kernel_size=1) if bias_fill: self.out_conv.bias.data.fill_(bias_value) def forward(self, x): x = self.feat_conv(x) x = self.relu(x) x = self.out_conv(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class SingleHeadNew(nn.Module): """ Single head used in CenterNet Head. """ def __init__(self, in_channel, out_channel, bias_fill=False, bias_value=0): super(SingleHeadNew, self).__init__() self.feat_conv = nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1) self.relu = nn.ReLU() self.out_conv = nn.Conv2d(in_channel, out_channel, kernel_size=1) if bias_fill: self.out_conv.bias.data.fill_(bias_value) def forward(self, input_0): primals_1 = self.feat_conv.weight primals_2 = self.feat_conv.bias primals_4 = self.out_conv.weight primals_5 = self.out_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
StevenGrove/DynamicHead
SingleHead
false
14,441
[ "Apache-2.0" ]
69
d62aa84e1d1c6a0c74d46258ad77b11413c10bef
https://github.com/StevenGrove/DynamicHead/tree/d62aa84e1d1c6a0c74d46258ad77b11413c10bef
ConvLSTMCell
import torch from torch.autograd import Variable import torch.nn as nn class ConvLSTMCell(nn.Module): def __init__(self, input_channels, hidden_channels, kernel_size, bias=True ): super(ConvLSTMCell, self).__init__() assert hidden_channels % 2 == 0 self.input_channels = input_channels self.hidden_channels = hidden_channels self.bias = bias self.kernel_size = kernel_size self.num_features = 4 self.padding = (kernel_size - 1) // 2 self.conv = nn.Conv2d(self.input_channels + self.hidden_channels, 4 * self.hidden_channels, self.kernel_size, 1, self.padding) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal(m.weight.data, std=0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, input, h, c): combined = torch.cat((input, h), dim=1) A = self.conv(combined) ai, af, ao, ag = torch.split(A, A.size()[1] // self.num_features, dim=1 ) i = torch.sigmoid(ai) f = torch.sigmoid(af) o = torch.sigmoid(ao) g = torch.tanh(ag) new_c = f * c + i * g new_h = o * torch.tanh(new_c) return new_h, new_c, o @staticmethod def init_hidden(batch_size, hidden_c, shape): return Variable(torch.zeros(batch_size, hidden_c, shape[0], shape[1]) ), Variable(torch.zeros(batch_size, hidden_c, shape[0], shape[1])) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 3, 3])] def get_init_inputs(): return [[], {'input_channels': 4, 'hidden_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.autograd import Variable import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 8 x0 = xindex % 16 x2 = xindex // 128 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 36 x4 = xindex % 36 x1 = xindex // 9 % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (72 + x4 + 144 * x2), xmask) tmp1 = tl.load(in_ptr1 + (8 + x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (x4 + 144 * x2), xmask) tmp5 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (108 + x4 + 144 * x2), xmask) tmp9 = tl.load(in_ptr1 + (12 + x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (36 + x4 + 144 * x2), xmask) tmp13 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp11 = libdevice.tanh(tmp10) tmp14 = tmp12 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp17 = tmp15 * tmp16 tmp18 = tmp7 * tmp11 tmp19 = tmp17 + tmp18 tmp20 = 1.0 tmp21 = tmp20 - tmp15 tmp22 = tmp15 * tmp21 tmp23 = libdevice.tanh(tmp19) tmp24 = tmp3 * tmp23 tl.store(out_ptr0 + x3, tmp3, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) tl.store(out_ptr2 + x3, tmp11, xmask) tl.store(out_ptr3 + x3, tmp19, xmask) tl.store(out_ptr4 + x3, tmp22, xmask) tl.store(out_ptr5 + x3, tmp24, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_4, (16,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 3, 3), (144, 9, 3, 1)) buf3 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) buf6 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_add_mul_sigmoid_sigmoid_backward_tanh_1[grid(144)]( buf1, primals_4, primals_5, buf3, buf2, buf4, buf5, buf7, buf6, 144, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_4 return (buf6, buf5, buf3, primals_3, primals_5, buf0, buf2, buf3, buf4, buf5, buf7) class ConvLSTMCellNew(nn.Module): def __init__(self, input_channels, hidden_channels, kernel_size, bias=True ): super(ConvLSTMCellNew, self).__init__() assert hidden_channels % 2 == 0 self.input_channels = input_channels self.hidden_channels = hidden_channels self.bias = bias self.kernel_size = kernel_size self.num_features = 4 self.padding = (kernel_size - 1) // 2 self.conv = nn.Conv2d(self.input_channels + self.hidden_channels, 4 * self.hidden_channels, self.kernel_size, 1, self.padding) self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal(m.weight.data, std=0.01) if m.bias is not None: m.bias.data.zero_() @staticmethod def init_hidden(batch_size, hidden_c, shape): return Variable(torch.zeros(batch_size, hidden_c, shape[0], shape[1]) ), Variable(torch.zeros(batch_size, hidden_c, shape[0], shape[1])) def forward(self, input_0, input_1, input_2): primals_3 = self.conv.weight primals_4 = self.conv.bias primals_1 = input_0 primals_2 = input_1 primals_5 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1], output[2]
Starboy-at-earth/DMRA
ConvLSTMCell
false
14,442
[ "MIT" ]
84
596cc6106ab5f1f03deb60a7f4bb0c2ad1029a83
https://github.com/Starboy-at-earth/DMRA/tree/596cc6106ab5f1f03deb60a7f4bb0c2ad1029a83
ILN
import torch import torch.nn as nn from torch.nn.parameter import Parameter class ILN(nn.Module): def __init__(self, num_features, eps=1e-05): super(ILN, self).__init__() self.eps = eps self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1)) self.beta = Parameter(torch.Tensor(1, num_features, 1, 1)) self.rho.data.fill_(0.0) self.gamma.data.fill_(1.0) self.beta.data.fill_(0.0) def forward(self, input): in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True ), torch.var(input, dim=[2, 3], keepdim=True) out_in = (input - in_mean) / torch.sqrt(in_var + self.eps) ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True ), torch.var(input, dim=[1, 2, 3], keepdim=True) out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps) out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 - self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln out = out * self.gamma.expand(input.shape[0], -1, -1, -1 ) + self.beta.expand(input.shape[0], -1, -1, -1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp38 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp40 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = 15.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp27 = tmp0 - tmp20 tmp28 = tmp27 / tmp25 tmp29 = tmp26 * tmp28 tmp30 = 1.0 tmp31 = tmp30 - tmp26 tmp33 = tmp0 - tmp32 tmp35 = tmp33 / tmp34 tmp36 = tmp31 * tmp35 tmp37 = tmp29 + tmp36 tmp39 = tmp37 * tmp38 tmp41 = tmp39 + tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp41, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf6 buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf9 get_raw_stream(0) triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11, primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf3 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1[grid(16)](buf1, buf5, primals_1, primals_2, buf7, buf11, primals_3, primals_4, buf12, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del primals_4 return buf12, primals_1, primals_2, primals_3, buf1, buf5, buf7, buf11 class ILNNew(nn.Module): def __init__(self, num_features, eps=1e-05): super(ILNNew, self).__init__() self.eps = eps self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1)) self.beta = Parameter(torch.Tensor(1, num_features, 1, 1)) self.rho.data.fill_(0.0) self.gamma.data.fill_(1.0) self.beta.data.fill_(0.0) def forward(self, input_0): primals_2 = self.rho primals_3 = self.gamma primals_4 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
SubZero12556/Cats2dogs_ONNX
ILN
false
14,443
[ "MIT" ]
2,519
52a6a60d519e23b02f0847f0fa9f9ead89ca5f4e
https://github.com/SubZero12556/Cats2dogs_ONNX/tree/52a6a60d519e23b02f0847f0fa9f9ead89ca5f4e
ResidualBlock
import torch import torch.utils.data import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim): super().__init__() self.fc_0 = nn.Conv1d(in_channels, hidden_dim, 1) self.fc_1 = nn.Conv1d(hidden_dim, out_channels, 1) self.activation = nn.ReLU() if in_channels != out_channels: self.shortcut = nn.Conv1d(in_channels, out_channels, 1) else: self.shortcut = nn.Identity() nn.init.zeros_(self.fc_1.weight) def forward(self, x): x_short = self.shortcut(x) x = self.fc_0(x) x = self.fc_1(self.activation(x)) x = self.activation(x + x_short) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1, primals_3, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4 ), (0, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4), (16, 4, 1)) buf3 = reinterpret_tensor(buf2, (4, 4), (4, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(16)](buf3, primals_5, primals_1, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return buf3, primals_2, primals_4, reinterpret_tensor(primals_1, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (1, 4, 4), (16, 4, 1), 0 ), buf4, buf5 class ResidualBlockNew(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim): super().__init__() self.fc_0 = nn.Conv1d(in_channels, hidden_dim, 1) self.fc_1 = nn.Conv1d(hidden_dim, out_channels, 1) self.activation = nn.ReLU() if in_channels != out_channels: self.shortcut = nn.Conv1d(in_channels, out_channels, 1) else: self.shortcut = nn.Identity() nn.init.zeros_(self.fc_1.weight) def forward(self, input_0): primals_2 = self.fc_0.weight primals_3 = self.fc_0.bias primals_4 = self.fc_1.weight primals_5 = self.fc_1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
StructuralNeurobiologyLab/LightConvPoint
ResidualBlock
false
14,444
[ "Apache-2.0" ]
58
3f353f45e9e910fa390a74520dfd478e3e88f104
https://github.com/StructuralNeurobiologyLab/LightConvPoint/tree/3f353f45e9e910fa390a74520dfd478e3e88f104
SplAtConv2d
from torch.autograd import Function from torch.nn import Module import logging import torch import torch.utils.data import torch.distributed as dist from torch import nn import torch.nn.functional as F from torch.autograd.function import Function from torch.autograd import Function from torch.nn.modules.utils import _pair from torch.nn import BatchNorm2d from torch.nn import ReLU def get_norm(norm, out_channels): """ Args: norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; or a callable that takes a channel number and returns the normalization layer as a nn.Module. Returns: nn.Module or None: the normalization layer """ if isinstance(norm, str): if len(norm) == 0: return None norm = {'BN': BatchNorm2d, 'SyncBN': NaiveSyncBatchNorm, 'FrozenBN': FrozenBatchNorm2d, 'GN': lambda channels: nn.GroupNorm(32, channels), 'nnSyncBN': nn.SyncBatchNorm}[norm] return norm(out_channels) class FrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. It contains non-trainable buffers called "weight" and "bias", "running_mean", "running_var", initialized to perform identity transformation. The pre-trained backbone models from Caffe2 only contain "weight" and "bias", which are computed from the original four parameters of BN. The affine transform `x * weight + bias` will perform the equivalent computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. When loading a backbone model from Caffe2, "running_mean" and "running_var" will be left unchanged as identity transformation. Other pre-trained backbone models may contain all 4 parameters. The forward is implemented by `F.batch_norm(..., training=False)`. """ _version = 3 def __init__(self, num_features, eps=1e-05): super().__init__() self.num_features = num_features self.eps = eps self.register_buffer('weight', torch.ones(num_features)) self.register_buffer('bias', torch.zeros(num_features)) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features) - eps) def forward(self, x): if x.requires_grad: scale = self.weight * (self.running_var + self.eps).rsqrt() bias = self.bias - self.running_mean * scale scale = scale.reshape(1, -1, 1, 1) bias = bias.reshape(1, -1, 1, 1) return x * scale + bias else: return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if version is None or version < 2: if prefix + 'running_mean' not in state_dict: state_dict[prefix + 'running_mean'] = torch.zeros_like(self .running_mean) if prefix + 'running_var' not in state_dict: state_dict[prefix + 'running_var'] = torch.ones_like(self. running_var) if version is not None and version < 3: logger = logging.getLogger(__name__) logger.info('FrozenBatchNorm {} is upgraded to version 3.'. format(prefix.rstrip('.'))) state_dict[prefix + 'running_var'] -= self.eps super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def __repr__(self): return 'FrozenBatchNorm2d(num_features={}, eps={})'.format(self. num_features, self.eps) @classmethod def convert_frozen_batchnorm(cls, module): """ Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. Args: module (torch.nn.Module): Returns: If module is BatchNorm/SyncBatchNorm, returns a new module. Otherwise, in-place convert module and return it. Similar to convert_sync_batchnorm in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py """ bn_module = nn.modules.batchnorm bn_module = bn_module.BatchNorm2d, bn_module.SyncBatchNorm res = module if isinstance(module, bn_module): res = cls(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = cls.convert_frozen_batchnorm(child) if new_child is not child: res.add_module(name, new_child) return res class _NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return _NewEmptyTensorOp.apply(grad, shape), None class Conv2d(torch.nn.Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def __init__(self, *args, **kwargs): """ Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: Args: norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ norm = kwargs.pop('norm', None) activation = kwargs.pop('activation', None) super().__init__(*args, **kwargs) self.norm = norm self.activation = activation def forward(self, x): if x.numel() == 0 and self.training: assert not isinstance(self.norm, torch.nn.SyncBatchNorm ), 'SyncBatchNorm does not support empty inputs!' if x.numel() == 0 and TORCH_VERSION <= (1, 4): assert not isinstance(self.norm, torch.nn.GroupNorm ), 'GroupNorm does not support empty inputs in PyTorch <=1.4!' output_shape = [((i + 2 * p - (di * (k - 1) + 1)) // s + 1) for i, p, di, k, s in zip(x.shape[-2:], self.padding, self. dilation, self.kernel_size, self.stride)] output_shape = [x.shape[0], self.weight.shape[0]] + output_shape empty = _NewEmptyTensorOp.apply(x, output_shape) if self.training: _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + _dummy else: return empty x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class AllReduce(Function): @staticmethod def forward(ctx, input): input_list = [torch.zeros_like(input) for k in range(dist. get_world_size())] dist.all_gather(input_list, input, async_op=False) inputs = torch.stack(input_list, dim=0) return torch.sum(inputs, dim=0) @staticmethod def backward(ctx, grad_output): dist.all_reduce(grad_output, async_op=False) return grad_output class NaiveSyncBatchNorm(BatchNorm2d): """ `torch.nn.SyncBatchNorm` has known unknown bugs. It produces significantly worse AP (and sometimes goes NaN) when the batch size on each worker is quite different (e.g., when scale augmentation is used, or when it is applied to mask head). Use this implementation before `nn.SyncBatchNorm` is fixed. It is slower than `nn.SyncBatchNorm`. Note: There isn't a single definition of Sync BatchNorm. When ``stats_mode==""``, this module computes overall statistics by using statistics of each worker with equal weight. The result is true statistics of all samples (as if they are all on one worker) only when all workers have the same (N, H, W). This mode does not support inputs with zero batch size. When ``stats_mode=="N"``, this module computes overall statistics by weighting the statistics of each worker by their ``N``. The result is true statistics of all samples (as if they are all on one worker) only when all workers have the same (H, W). It is slower than ``stats_mode==""``. Even though the result of this module may not be the true statistics of all samples, it may still be reasonable because it might be preferrable to assign equal weights to all workers, regardless of their (H, W) dimension, instead of putting larger weight on larger images. From preliminary experiments, little difference is found between such a simplified implementation and an accurate computation of overall mean & variance. """ def __init__(self, *args, stats_mode='', **kwargs): super().__init__(*args, **kwargs) assert stats_mode in ['', 'N'] self._stats_mode = stats_mode def forward(self, input): if comm.get_world_size() == 1 or not self.training: return super().forward(input) B, C = input.shape[0], input.shape[1] mean = torch.mean(input, dim=[0, 2, 3]) meansqr = torch.mean(input * input, dim=[0, 2, 3]) if self._stats_mode == '': assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' vec = torch.cat([mean, meansqr], dim=0) vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) mean, meansqr = torch.split(vec, C) momentum = self.momentum else: if B == 0: vec = torch.zeros([2 * C + 1], device=mean.device, dtype= mean.dtype) vec = vec + input.sum() else: vec = torch.cat([mean, meansqr, torch.ones([1], device=mean .device, dtype=mean.dtype)], dim=0) vec = AllReduce.apply(vec * B) total_batch = vec[-1].detach() momentum = total_batch.clamp(max=1) * self.momentum total_batch = torch.max(total_batch, torch.ones_like(total_batch)) mean, meansqr, _ = torch.split(vec / total_batch, C) var = meansqr - mean * mean invstd = torch.rsqrt(var + self.eps) scale = self.weight * invstd bias = self.bias - mean * scale scale = scale.reshape(1, -1, 1, 1) bias = bias.reshape(1, -1, 1, 1) self.running_mean += momentum * (mean.detach() - self.running_mean) self.running_var += momentum * (var.detach() - self.running_var) return input * scale + bias class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv2d(Module): """Split-Attention Conv2d """ def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm=None, dropblock_prob=0.0, **kwargs): super(SplAtConv2d, self).__init__() padding = _pair(padding) self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm is not None if self.use_bn: self.bn0 = get_norm(norm, channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = get_norm(norm, inter_channels) self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock2D(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, x): x = self.conv(x) if self.use_bn: x = self.bn0(x) if self.dropblock_prob > 0.0: x = self.dropblock(x) x = self.relu(x) batch, rchannel = x.shape[:2] if self.radix > 1: splited = torch.split(x, rchannel // self.radix, dim=1) gap = sum(splited) else: gap = x gap = F.adaptive_avg_pool2d(gap, 1) gap = self.fc1(gap) if self.use_bn: gap = self.bn1(gap) gap = self.relu(gap) atten = self.fc2(gap) atten = self.rsoftmax(atten).view(batch, -1, 1, 1) if self.radix > 1: attens = torch.split(atten, rchannel // self.radix, dim=1) out = sum([(att * split) for att, split in zip(attens, splited)]) else: out = atten * x return out.contiguous() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.autograd import Function from torch.nn import Module import logging import torch.utils.data import torch.distributed as dist from torch import nn import torch.nn.functional as F from torch.autograd.function import Function from torch.autograd import Function from torch.nn.modules.utils import _pair from torch.nn import BatchNorm2d from torch.nn import ReLU assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp1 = 0.0 tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp5 = 1.0 tmp6 = tmp4 / tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 32 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tl_math.exp(tmp4) tmp6 = tmp1 - tmp3 tmp7 = tl_math.exp(tmp6) tmp8 = tmp2 - tmp3 tmp9 = tl_math.exp(tmp8) tmp10 = tmp7 + tmp9 tmp11 = tmp5 / tmp10 tl.store(out_ptr0 + x3, tmp11, xmask) @triton.jit def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask) tmp5 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask) tmp6 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask) tmp2 = tmp0 * tmp1 tmp3 = 0.0 tmp4 = tmp2 + tmp3 tmp7 = tmp5 * tmp6 tmp8 = tmp4 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8, 2, 4, 4), (32, 16, 4, 1)) assert_size_stride(primals_3, (8,), (1,)) assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_7, (8,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None) assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0) del buf0 buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(32)](buf1, primals_3, buf9, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_mean_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_2[grid(128)](buf4, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_3[grid(32)](buf6, primals_7, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_7 buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32) triton_poi_fused__softmax_4[grid(32)](buf6, buf7, 32, XBLOCK=32, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_mul_5[grid(16)](buf7, buf1, buf8, 16, XBLOCK= 16, num_warps=1, num_stages=1) return (buf8, primals_1, primals_2, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9) def get_norm(norm, out_channels): """ Args: norm (str or callable): either one of BN, SyncBN, FrozenBN, GN; or a callable that takes a channel number and returns the normalization layer as a nn.Module. Returns: nn.Module or None: the normalization layer """ if isinstance(norm, str): if len(norm) == 0: return None norm = {'BN': BatchNorm2d, 'SyncBN': NaiveSyncBatchNorm, 'FrozenBN': FrozenBatchNorm2d, 'GN': lambda channels: nn.GroupNorm(32, channels), 'nnSyncBN': nn.SyncBatchNorm}[norm] return norm(out_channels) class FrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. It contains non-trainable buffers called "weight" and "bias", "running_mean", "running_var", initialized to perform identity transformation. The pre-trained backbone models from Caffe2 only contain "weight" and "bias", which are computed from the original four parameters of BN. The affine transform `x * weight + bias` will perform the equivalent computation of `(x - running_mean) / sqrt(running_var) * weight + bias`. When loading a backbone model from Caffe2, "running_mean" and "running_var" will be left unchanged as identity transformation. Other pre-trained backbone models may contain all 4 parameters. The forward is implemented by `F.batch_norm(..., training=False)`. """ _version = 3 def __init__(self, num_features, eps=1e-05): super().__init__() self.num_features = num_features self.eps = eps self.register_buffer('weight', torch.ones(num_features)) self.register_buffer('bias', torch.zeros(num_features)) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features) - eps) def forward(self, x): if x.requires_grad: scale = self.weight * (self.running_var + self.eps).rsqrt() bias = self.bias - self.running_mean * scale scale = scale.reshape(1, -1, 1, 1) bias = bias.reshape(1, -1, 1, 1) return x * scale + bias else: return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, training=False, eps=self.eps) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get('version', None) if version is None or version < 2: if prefix + 'running_mean' not in state_dict: state_dict[prefix + 'running_mean'] = torch.zeros_like(self .running_mean) if prefix + 'running_var' not in state_dict: state_dict[prefix + 'running_var'] = torch.ones_like(self. running_var) if version is not None and version < 3: logger = logging.getLogger(__name__) logger.info('FrozenBatchNorm {} is upgraded to version 3.'. format(prefix.rstrip('.'))) state_dict[prefix + 'running_var'] -= self.eps super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def __repr__(self): return 'FrozenBatchNorm2d(num_features={}, eps={})'.format(self. num_features, self.eps) @classmethod def convert_frozen_batchnorm(cls, module): """ Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm. Args: module (torch.nn.Module): Returns: If module is BatchNorm/SyncBatchNorm, returns a new module. Otherwise, in-place convert module and return it. Similar to convert_sync_batchnorm in https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py """ bn_module = nn.modules.batchnorm bn_module = bn_module.BatchNorm2d, bn_module.SyncBatchNorm res = module if isinstance(module, bn_module): res = cls(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = cls.convert_frozen_batchnorm(child) if new_child is not child: res.add_module(name, new_child) return res class _NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return _NewEmptyTensorOp.apply(grad, shape), None class Conv2d(torch.nn.Conv2d): """ A wrapper around :class:`torch.nn.Conv2d` to support empty inputs and more features. """ def __init__(self, *args, **kwargs): """ Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`: Args: norm (nn.Module, optional): a normalization layer activation (callable(Tensor) -> Tensor): a callable activation function It assumes that norm layer is used before activation. """ norm = kwargs.pop('norm', None) activation = kwargs.pop('activation', None) super().__init__(*args, **kwargs) self.norm = norm self.activation = activation def forward(self, x): if x.numel() == 0 and self.training: assert not isinstance(self.norm, torch.nn.SyncBatchNorm ), 'SyncBatchNorm does not support empty inputs!' if x.numel() == 0 and TORCH_VERSION <= (1, 4): assert not isinstance(self.norm, torch.nn.GroupNorm ), 'GroupNorm does not support empty inputs in PyTorch <=1.4!' output_shape = [((i + 2 * p - (di * (k - 1) + 1)) // s + 1) for i, p, di, k, s in zip(x.shape[-2:], self.padding, self. dilation, self.kernel_size, self.stride)] output_shape = [x.shape[0], self.weight.shape[0]] + output_shape empty = _NewEmptyTensorOp.apply(x, output_shape) if self.training: _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + _dummy else: return empty x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class AllReduce(Function): @staticmethod def forward(ctx, input): input_list = [torch.zeros_like(input) for k in range(dist. get_world_size())] dist.all_gather(input_list, input, async_op=False) inputs = torch.stack(input_list, dim=0) return torch.sum(inputs, dim=0) @staticmethod def backward(ctx, grad_output): dist.all_reduce(grad_output, async_op=False) return grad_output class NaiveSyncBatchNorm(BatchNorm2d): """ `torch.nn.SyncBatchNorm` has known unknown bugs. It produces significantly worse AP (and sometimes goes NaN) when the batch size on each worker is quite different (e.g., when scale augmentation is used, or when it is applied to mask head). Use this implementation before `nn.SyncBatchNorm` is fixed. It is slower than `nn.SyncBatchNorm`. Note: There isn't a single definition of Sync BatchNorm. When ``stats_mode==""``, this module computes overall statistics by using statistics of each worker with equal weight. The result is true statistics of all samples (as if they are all on one worker) only when all workers have the same (N, H, W). This mode does not support inputs with zero batch size. When ``stats_mode=="N"``, this module computes overall statistics by weighting the statistics of each worker by their ``N``. The result is true statistics of all samples (as if they are all on one worker) only when all workers have the same (H, W). It is slower than ``stats_mode==""``. Even though the result of this module may not be the true statistics of all samples, it may still be reasonable because it might be preferrable to assign equal weights to all workers, regardless of their (H, W) dimension, instead of putting larger weight on larger images. From preliminary experiments, little difference is found between such a simplified implementation and an accurate computation of overall mean & variance. """ def __init__(self, *args, stats_mode='', **kwargs): super().__init__(*args, **kwargs) assert stats_mode in ['', 'N'] self._stats_mode = stats_mode def forward(self, input): if comm.get_world_size() == 1 or not self.training: return super().forward(input) B, C = input.shape[0], input.shape[1] mean = torch.mean(input, dim=[0, 2, 3]) meansqr = torch.mean(input * input, dim=[0, 2, 3]) if self._stats_mode == '': assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.' vec = torch.cat([mean, meansqr], dim=0) vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size()) mean, meansqr = torch.split(vec, C) momentum = self.momentum else: if B == 0: vec = torch.zeros([2 * C + 1], device=mean.device, dtype= mean.dtype) vec = vec + input.sum() else: vec = torch.cat([mean, meansqr, torch.ones([1], device=mean .device, dtype=mean.dtype)], dim=0) vec = AllReduce.apply(vec * B) total_batch = vec[-1].detach() momentum = total_batch.clamp(max=1) * self.momentum total_batch = torch.max(total_batch, torch.ones_like(total_batch)) mean, meansqr, _ = torch.split(vec / total_batch, C) var = meansqr - mean * mean invstd = torch.rsqrt(var + self.eps) scale = self.weight * invstd bias = self.bias - mean * scale scale = scale.reshape(1, -1, 1, 1) bias = bias.reshape(1, -1, 1, 1) self.running_mean += momentum * (mean.detach() - self.running_mean) self.running_var += momentum * (var.detach() - self.running_var) return input * scale + bias class rSoftMax(nn.Module): def __init__(self, radix, cardinality): super().__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplAtConv2dNew(Module): """Split-Attention Conv2d """ def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm=None, dropblock_prob=0.0, **kwargs): super(SplAtConv2dNew, self).__init__() padding = _pair(padding) self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) self.rectify_avg = rectify_avg inter_channels = max(in_channels * radix // reduction_factor, 32) self.radix = radix self.cardinality = groups self.channels = channels self.dropblock_prob = dropblock_prob if self.rectify: self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs) else: self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.use_bn = norm is not None if self.use_bn: self.bn0 = get_norm(norm, channels * radix) self.relu = ReLU(inplace=True) self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) if self.use_bn: self.bn1 = get_norm(norm, inter_channels) self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self. cardinality) if dropblock_prob > 0.0: self.dropblock = DropBlock2D(dropblock_prob, 3) self.rsoftmax = rSoftMax(radix, groups) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Shun14/detectron2-ResNeSt
SplAtConv2d
false
14,445
[ "Apache-2.0" ]
344
cda53a237199da3bbe7526d41c41b9d8df4c4814
https://github.com/Shun14/detectron2-ResNeSt/tree/cda53a237199da3bbe7526d41c41b9d8df4c4814
Net
import torch from torch.nn import functional as F class Net(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(Net, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) self.hidden_two = torch.nn.Linear(n_hidden, n_hidden) self.hidden_3 = torch.nn.Linear(n_hidden, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, x): x = F.relu(self.hidden(x)) x = F.relu(self.hidden_two(x)) x = F.relu(self.hidden_3(x)) x = self.predict(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_feature': 4, 'n_hidden': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3, primals_5, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5, primals_7, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor( buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), primals_8, buf7, primals_6, buf8, primals_4, buf9 class NetNew(torch.nn.Module): def __init__(self, n_feature, n_hidden, n_output): super(NetNew, self).__init__() self.hidden = torch.nn.Linear(n_feature, n_hidden) self.hidden_two = torch.nn.Linear(n_hidden, n_hidden) self.hidden_3 = torch.nn.Linear(n_hidden, n_hidden) self.predict = torch.nn.Linear(n_hidden, n_output) def forward(self, input_0): primals_1 = self.hidden.weight primals_2 = self.hidden.bias primals_4 = self.hidden_two.weight primals_5 = self.hidden_two.bias primals_6 = self.hidden_3.weight primals_7 = self.hidden_3.bias primals_8 = self.predict.weight primals_9 = self.predict.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
SunHaozhe/modular-metalearning
Net
false
14,446
[ "MIT" ]
70
c94dd18c6d105f18667d4de7bb4c81fa538a541c
https://github.com/SunHaozhe/modular-metalearning/tree/c94dd18c6d105f18667d4de7bb4c81fa538a541c
VarifocalLoss
import torch import torch.nn.functional as F import torch.nn as nn def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning target of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = (reduction_override if reduction_override else self. reduction) if self.use_sigmoid: loss_cls = self.loss_weight * varifocal_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, iou_weighted= self.iou_weighted, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tmp0 > tmp5 tmp14 = tmp13.to(tl.float32) tmp15 = tmp0 * tmp14 tmp16 = tl.sigmoid(tmp3) tmp17 = tmp16 - tmp0 tmp18 = tl_math.abs(tmp17) tmp19 = tmp18 * tmp18 tmp20 = 0.75 tmp21 = tmp19 * tmp20 tmp22 = tmp0 <= tmp5 tmp23 = tmp22.to(tl.float32) tmp24 = tmp21 * tmp23 tmp25 = tmp15 + tmp24 tmp26 = tmp12 * tmp25 tmp27 = tl.broadcast_to(tmp26, [RBLOCK]) tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0)) tmp30 = 256.0 tmp31 = tmp29 / tmp30 tmp32 = tmp31 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp32, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused__to_copy_abs_add_binary_cross_entropy_with_logits_gt_le_mean_mul_pow_sigmoid_sub_0[ grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ if weight is not None: loss = loss * weight if avg_factor is None: loss = reduce_loss(loss, reduction) elif reduction == 'mean': loss = loss.sum() / avg_factor elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive example with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ assert pred.size() == target.size() pred_sigmoid = pred.sigmoid() target = target.type_as(pred) if iou_weighted: focal_weight = target * (target > 0.0).float() + alpha * (pred_sigmoid - target).abs().pow(gamma) * (target <= 0.0).float() else: focal_weight = (target > 0.0).float() + alpha * (pred_sigmoid - target ).abs().pow(gamma) * (target <= 0.0).float() loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none' ) * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss class VarifocalLossNew(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_ Args: use_sigmoid (bool, optional): Whether the prediction is used for sigmoid or softmax. Defaults to True. alpha (float, optional): A balance factor for the negative part of Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. iou_weighted (bool, optional): Whether to weight the loss of the positive examples with the iou target. Defaults to True. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(VarifocalLossNew, self).__init__() assert use_sigmoid is True, 'Only sigmoid varifocal loss supported now.' assert alpha >= 0.0 self.use_sigmoid = use_sigmoid self.alpha = alpha self.gamma = gamma self.iou_weighted = iou_weighted self.reduction = reduction self.loss_weight = loss_weight def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Sundrops/mmdetection
VarifocalLoss
false
14,447
[ "Apache-2.0" ]
549
d3cf38d91c454b1a6881e8c36c1e4a66dc5521b8
https://github.com/Sundrops/mmdetection/tree/d3cf38d91c454b1a6881e8c36c1e4a66dc5521b8
ContextPooler
from _paritybench_helpers import _mock_config import math import torch from torch import nn def get_mask(input, local_context): if not isinstance(local_context, DropoutContext): dropout = local_context mask = None else: dropout = local_context.dropout dropout *= local_context.scale mask = local_context.mask if local_context.reuse_mask else None if dropout > 0 and mask is None: mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool() if isinstance(local_context, DropoutContext): if local_context.mask is None: local_context.mask = mask return mask, dropout def gelu(x): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) class DropoutContext(object): def __init__(self): self.dropout = 0 self.mask = None self.scale = 1 self.reuse_mask = True class XDropout(torch.autograd.Function): """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" @staticmethod def forward(ctx, input, local_ctx): mask, dropout = get_mask(input, local_ctx) ctx.scale = 1.0 / (1 - dropout) if dropout > 0: ctx.save_for_backward(mask) return input.masked_fill(mask, 0) * ctx.scale else: return input @staticmethod def backward(ctx, grad_output): if ctx.scale > 1: mask, = ctx.saved_tensors return grad_output.masked_fill(mask, 0) * ctx.scale, None else: return grad_output, None class StableDropout(torch.nn.Module): """ Optimized dropout module for stabilizing the training Args: drop_prob (float): the dropout probabilities """ def __init__(self, drop_prob): super().__init__() self.drop_prob = drop_prob self.count = 0 self.context_stack = None def forward(self, x): """ Call the module Args: x (:obj:`torch.tensor`): The input tensor to apply dropout """ if self.training and self.drop_prob > 0: return XDropout.apply(x, self.get_context()) return x def clear_context(self): self.count = 0 self.context_stack = None def init_context(self, reuse_mask=True, scale=1): if self.context_stack is None: self.context_stack = [] self.count = 0 for c in self.context_stack: c.reuse_mask = reuse_mask c.scale = scale def get_context(self): if self.context_stack is not None: if self.count >= len(self.context_stack): self.context_stack.append(DropoutContext()) ctx = self.context_stack[self.count] ctx.dropout = self.drop_prob self.count += 1 return ctx else: return self.drop_prob class ContextPooler(nn.Module): def __init__(self, config): super().__init__() try: pooler_hidden_size = config.pooler_hidden_size pooler_dropout = config.pooler_dropout except AttributeError: pooler_hidden_size = config.hidden_size pooler_dropout = 0.1 self.dense = nn.Linear(pooler_hidden_size, pooler_hidden_size) self.dropout = StableDropout(pooler_dropout) self.config = config def forward(self, hidden_states): context_token = hidden_states[:, 0] context_token = self.dropout(context_token) pooled_output = self.dense(context_token) pooled_output = gelu(pooled_output) return pooled_output @property def output_dim(self): return self.config.hidden_size def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(pooler_hidden_size=4, pooler_dropout=0.5, hidden_size=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_mul_pow_tanh_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = tmp2 * tmp2 tmp6 = tmp5 * tmp2 tmp7 = 0.044715 tmp8 = tmp6 * tmp7 tmp9 = tmp2 + tmp8 tmp10 = 0.7978845608028654 tmp11 = tmp9 * tmp10 tmp12 = libdevice.tanh(tmp11) tmp13 = 1.0 tmp14 = tmp12 + tmp13 tmp15 = tmp4 * tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_mul_pow_tanh_1[grid(64)](buf1, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf2, primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1 def get_mask(input, local_context): if not isinstance(local_context, DropoutContext): dropout = local_context mask = None else: dropout = local_context.dropout dropout *= local_context.scale mask = local_context.mask if local_context.reuse_mask else None if dropout > 0 and mask is None: mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).bool() if isinstance(local_context, DropoutContext): if local_context.mask is None: local_context.mask = mask return mask, dropout def gelu(x): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) class DropoutContext(object): def __init__(self): self.dropout = 0 self.mask = None self.scale = 1 self.reuse_mask = True class XDropout(torch.autograd.Function): """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" @staticmethod def forward(ctx, input, local_ctx): mask, dropout = get_mask(input, local_ctx) ctx.scale = 1.0 / (1 - dropout) if dropout > 0: ctx.save_for_backward(mask) return input.masked_fill(mask, 0) * ctx.scale else: return input @staticmethod def backward(ctx, grad_output): if ctx.scale > 1: mask, = ctx.saved_tensors return grad_output.masked_fill(mask, 0) * ctx.scale, None else: return grad_output, None class StableDropout(torch.nn.Module): """ Optimized dropout module for stabilizing the training Args: drop_prob (float): the dropout probabilities """ def __init__(self, drop_prob): super().__init__() self.drop_prob = drop_prob self.count = 0 self.context_stack = None def forward(self, x): """ Call the module Args: x (:obj:`torch.tensor`): The input tensor to apply dropout """ if self.training and self.drop_prob > 0: return XDropout.apply(x, self.get_context()) return x def clear_context(self): self.count = 0 self.context_stack = None def init_context(self, reuse_mask=True, scale=1): if self.context_stack is None: self.context_stack = [] self.count = 0 for c in self.context_stack: c.reuse_mask = reuse_mask c.scale = scale def get_context(self): if self.context_stack is not None: if self.count >= len(self.context_stack): self.context_stack.append(DropoutContext()) ctx = self.context_stack[self.count] ctx.dropout = self.drop_prob self.count += 1 return ctx else: return self.drop_prob class ContextPoolerNew(nn.Module): def __init__(self, config): super().__init__() try: pooler_hidden_size = config.pooler_hidden_size pooler_dropout = config.pooler_dropout except AttributeError: pooler_hidden_size = config.hidden_size pooler_dropout = 0.1 self.dense = nn.Linear(pooler_hidden_size, pooler_hidden_size) self.dropout = StableDropout(pooler_dropout) self.config = config @property def output_dim(self): return self.config.hidden_size def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Stochastic-Adventure/ClinicalTransformerRelationExtraction
ContextPooler
false
14,448
[ "MIT" ]
78
eef956bbfbd64b008014ef7cac5f818087816725
https://github.com/Stochastic-Adventure/ClinicalTransformerRelationExtraction/tree/eef956bbfbd64b008014ef7cac5f818087816725
MockAccuracy
import torch class _Metric(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): raise NotImplementedError() class Accuracy(_Metric): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): """ :param input: [B, L] :param target: [B, L] :return: """ bool_acc = input.long() == target.long() return bool_acc.sum() / bool_acc.numel() class MockAccuracy(Accuracy): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): return super().forward(input, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_div_eq_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp2 = tl.load(in_ptr1 + r0, None) tmp1 = tmp0.to(tl.int64) tmp3 = tmp2.to(tl.int64) tmp4 = tmp1 == tmp3 tmp5 = tmp4.to(tl.int64) tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tmp8.to(tl.float32) tmp10 = 0.00390625 tmp11 = tmp9 * tmp10 tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused__to_copy_div_eq_sum_0[grid(1)](arg0_1, arg1_1, buf1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class _Metric(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): raise NotImplementedError() class Accuracy(_Metric): def __init__(self): super().__init__() def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'): """ :param input: [B, L] :param target: [B, L] :return: """ bool_acc = input.long() == target.long() return bool_acc.sum() / bool_acc.numel() class MockAccuracyNew(Accuracy): def __init__(self): super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Stillerman/MusicTransformer-pytorch
MockAccuracy
false
14,449
[ "MIT" ]
170
73abb7cab271beba042b7b6fc06a6a9aaee82e8c
https://github.com/Stillerman/MusicTransformer-pytorch/tree/73abb7cab271beba042b7b6fc06a6a9aaee82e8c
ConvolutionBlock
import torch class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) class Conv1dWithInitialization(BaseModule): def __init__(self, **kwargs): super(Conv1dWithInitialization, self).__init__() self.conv1d = torch.nn.Conv1d(**kwargs) torch.nn.init.orthogonal_(self.conv1d.weight.data, gain=1) def forward(self, x): return self.conv1d(x) class ConvolutionBlock(BaseModule): def __init__(self, in_channels, out_channels, dilation): super(ConvolutionBlock, self).__init__() self.leaky_relu = torch.nn.LeakyReLU(0.2) self.convolution = Conv1dWithInitialization(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding= dilation, dilation=dilation) def forward(self, x): outputs = self.leaky_relu(x) outputs = self.convolution(outputs) return outputs def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 0.2 tmp4 = tmp0 * tmp3 tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(16)](primals_1, buf0, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 4 ), (0, 4, 1), 0), primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 4), (16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4), (4, 1), 0 ), primals_2, reinterpret_tensor(buf0, (1, 4, 4), (16, 4, 1), 0) class BaseModule(torch.nn.Module): def __init__(self): super(BaseModule, self).__init__() @property def nparams(self): return sum(p.numel() for p in self.parameters() if p.requires_grad) class Conv1dWithInitialization(BaseModule): def __init__(self, **kwargs): super(Conv1dWithInitialization, self).__init__() self.conv1d = torch.nn.Conv1d(**kwargs) torch.nn.init.orthogonal_(self.conv1d.weight.data, gain=1) def forward(self, x): return self.conv1d(x) class ConvolutionBlockNew(BaseModule): def __init__(self, in_channels, out_channels, dilation): super(ConvolutionBlockNew, self).__init__() self.leaky_relu = torch.nn.LeakyReLU(0.2) self.convolution = Conv1dWithInitialization(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding= dilation, dilation=dilation) def forward(self, input_0): primals_2 = self.convolution.conv1d.weight primals_3 = self.convolution.conv1d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Seungwoo0326/WaveGrad2-1
ConvolutionBlock
false
14,450
[ "MIT" ]
45
3b202201348449b89353f28bce1596ca7939a810
https://github.com/Seungwoo0326/WaveGrad2-1/tree/3b202201348449b89353f28bce1596ca7939a810
ClassificationCircleLoss
import torch import torch.nn as nn import torch.utils.data from typing import Tuple from torch.nn.functional import cross_entropy from itertools import product as product from math import sqrt as sqrt class ClassificationCircleLoss(nn.Module): """Circle loss for class-level labels as described in the paper `"Circle Loss: A Unified Perspective of Pair Similarity Optimization" <#>`_ Args: scale (float): the scale factor. Default: 256.0 margin (float): the relax margin value. Default: 0.25 circle_center (tuple[float]): the center of the circle (logit_ap, logit_an). Default: (1, 0) reduction (string, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` """ def __init__(self, scale: 'float'=256.0, margin: 'float'=0.25, circle_center: 'Tuple[float, float]'=(1, 0), reduction: 'str'='mean' ) ->None: super(ClassificationCircleLoss, self).__init__() self.scale = scale self.margin = margin self.circle_center = circle_center self.reduction = reduction def forward(self, logits: 'torch.Tensor', targets: 'torch.LongTensor' ) ->torch.Tensor: """ Args: logits (torch.Tensor): The predicted logits before softmax, namely :math:`\\cos \\theta` in the above equation, with shape of :math:`(N, C)` targets (torch.LongTensor): The ground-truth label long vector, namely :math:`y` in the above equation, with shape of :math:`(N,)` Returns: torch.Tensor: loss the computed loss """ mask = torch.zeros(logits.shape, dtype=torch.bool, device=logits.device ).scatter_(dim=1, index=targets.unsqueeze(1), value=1) positive_weighting = torch.clamp(self.circle_center[0] + self. margin - logits.detach(), min=0) negative_weighting = torch.clamp(logits.detach() - self. circle_center[1] + self.margin, min=0) logits = torch.where(mask, self.scale * positive_weighting * ( logits - (self.circle_center[0] - self.margin)), self.scale * negative_weighting * (logits - self.circle_center[1] - self.margin) ) loss = cross_entropy(input=logits, target=targets, reduction=self. reduction) return loss def get_inputs(): return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4], dtype= torch.int64)] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.utils.data from typing import Tuple from itertools import product as product from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0( in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp67 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], True, tl.int1) tmp4 = tl.full([1], False, tl.int1) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp7 = tmp6.to(tl.float32) tmp8 = 1.25 tmp9 = tmp8 - tmp7 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = 256.0 tmp13 = tmp11 * tmp12 tmp14 = 0.75 tmp15 = tmp7 - tmp14 tmp16 = tmp13 * tmp15 tmp17 = tmp6 - tmp1 tmp18 = tmp17.to(tl.float32) tmp19 = 0.25 tmp20 = tmp18 + tmp19 tmp21 = triton_helpers.maximum(tmp20, tmp10) tmp22 = tmp21 * tmp12 tmp23 = tmp18 - tmp19 tmp24 = tmp22 * tmp23 tmp25 = tl.where(tmp5, tmp16, tmp24) tmp26 = tl.full([1], 1, tl.int64) tmp27 = tmp0 == tmp26 tmp28 = tl.where(tmp27, tmp3, tmp4) tmp30 = tmp29.to(tl.float32) tmp31 = tmp8 - tmp30 tmp32 = triton_helpers.maximum(tmp31, tmp10) tmp33 = tmp32 * tmp12 tmp34 = tmp30 - tmp14 tmp35 = tmp33 * tmp34 tmp36 = tmp29 - tmp1 tmp37 = tmp36.to(tl.float32) tmp38 = tmp37 + tmp19 tmp39 = triton_helpers.maximum(tmp38, tmp10) tmp40 = tmp39 * tmp12 tmp41 = tmp37 - tmp19 tmp42 = tmp40 * tmp41 tmp43 = tl.where(tmp28, tmp35, tmp42) tmp44 = triton_helpers.maximum(tmp25, tmp43) tmp45 = tl.full([1], 2, tl.int64) tmp46 = tmp0 == tmp45 tmp47 = tl.where(tmp46, tmp3, tmp4) tmp49 = tmp48.to(tl.float32) tmp50 = tmp8 - tmp49 tmp51 = triton_helpers.maximum(tmp50, tmp10) tmp52 = tmp51 * tmp12 tmp53 = tmp49 - tmp14 tmp54 = tmp52 * tmp53 tmp55 = tmp48 - tmp1 tmp56 = tmp55.to(tl.float32) tmp57 = tmp56 + tmp19 tmp58 = triton_helpers.maximum(tmp57, tmp10) tmp59 = tmp58 * tmp12 tmp60 = tmp56 - tmp19 tmp61 = tmp59 * tmp60 tmp62 = tl.where(tmp47, tmp54, tmp61) tmp63 = triton_helpers.maximum(tmp44, tmp62) tmp64 = tl.full([1], 3, tl.int64) tmp65 = tmp0 == tmp64 tmp66 = tl.where(tmp65, tmp3, tmp4) tmp68 = tmp67.to(tl.float32) tmp69 = tmp8 - tmp68 tmp70 = triton_helpers.maximum(tmp69, tmp10) tmp71 = tmp70 * tmp12 tmp72 = tmp68 - tmp14 tmp73 = tmp71 * tmp72 tmp74 = tmp67 - tmp1 tmp75 = tmp74.to(tl.float32) tmp76 = tmp75 + tmp19 tmp77 = triton_helpers.maximum(tmp76, tmp10) tmp78 = tmp77 * tmp12 tmp79 = tmp75 - tmp19 tmp80 = tmp78 * tmp79 tmp81 = tl.where(tmp66, tmp73, tmp80) tmp82 = triton_helpers.maximum(tmp63, tmp81) tmp83 = tmp25 - tmp82 tmp84 = tl_math.exp(tmp83) tmp85 = tmp43 - tmp82 tmp86 = tl_math.exp(tmp85) tmp87 = tmp84 + tmp86 tmp88 = tmp62 - tmp82 tmp89 = tl_math.exp(tmp88) tmp90 = tmp87 + tmp89 tmp91 = tmp81 - tmp82 tmp92 = tl_math.exp(tmp91) tmp93 = tmp90 + tmp92 tl.store(out_ptr0 + x0, tmp82, xmask) tl.store(out_ptr1 + x0, tmp93, xmask) @triton.jit def triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + x2, xmask) tmp27 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp1 = x0 tmp2 = tmp0 == tmp1 tmp3 = tl.full([1], True, tl.int1) tmp4 = tl.full([1], False, tl.int1) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp7 = tmp6.to(tl.float32) tmp8 = 1.25 tmp9 = tmp8 - tmp7 tmp10 = 0.0 tmp11 = triton_helpers.maximum(tmp9, tmp10) tmp12 = 256.0 tmp13 = tmp11 * tmp12 tmp14 = 0.75 tmp15 = tmp7 - tmp14 tmp16 = tmp13 * tmp15 tmp17 = tl.full([1], 0, tl.int64) tmp18 = tmp6 - tmp17 tmp19 = tmp18.to(tl.float32) tmp20 = 0.25 tmp21 = tmp19 + tmp20 tmp22 = triton_helpers.maximum(tmp21, tmp10) tmp23 = tmp22 * tmp12 tmp24 = tmp19 - tmp20 tmp25 = tmp23 * tmp24 tmp26 = tl.where(tmp5, tmp16, tmp25) tmp28 = tmp26 - tmp27 tmp30 = tl_math.log(tmp29) tmp31 = tmp28 - tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) @triton.jit def triton_per_fused_nll_loss_forward_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.full([1, 1], -100, tl.int64) tmp2 = tmp0 != tmp1 tmp3 = tl.full([1, 1], 0, tl.int64) tmp4 = tl.where(tmp2, tmp0, tmp3) tmp5 = tl.full([XBLOCK, RBLOCK], 4, tl.int32) tmp6 = tmp4 + tmp5 tmp7 = tmp4 < 0 tmp8 = tl.where(tmp7, tmp6, tmp4) tl.device_assert((0 <= tmp8) & (tmp8 < 4), 'index out of bounds: 0 <= tmp8 < 4') tmp10 = tl.load(in_ptr1 + (tmp8 + 4 * r0), None, eviction_policy= 'evict_last') tmp11 = -tmp10 tmp12 = 0.0 tmp13 = tl.where(tmp2, tmp11, tmp12) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.sum(tmp14, 1)[:, None] tmp17 = tmp2.to(tl.int64) tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tmp20.to(tl.float32) tmp22 = tmp16 / tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_0[ grid(4)](arg1_1, arg0_1, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__log_softmax_add_clamp_mul_rsub_scatter_sub_where_1[ grid(16)](arg1_1, arg0_1, buf0, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del buf0 del buf1 buf3 = empty_strided_cuda((), (), torch.float32) buf5 = buf3 del buf3 triton_per_fused_nll_loss_forward_2[grid(1)](buf5, arg1_1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg1_1 del buf2 return buf5, class ClassificationCircleLossNew(nn.Module): """Circle loss for class-level labels as described in the paper `"Circle Loss: A Unified Perspective of Pair Similarity Optimization" <#>`_ Args: scale (float): the scale factor. Default: 256.0 margin (float): the relax margin value. Default: 0.25 circle_center (tuple[float]): the center of the circle (logit_ap, logit_an). Default: (1, 0) reduction (string, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'`` """ def __init__(self, scale: 'float'=256.0, margin: 'float'=0.25, circle_center: 'Tuple[float, float]'=(1, 0), reduction: 'str'='mean' ) ->None: super(ClassificationCircleLossNew, self).__init__() self.scale = scale self.margin = margin self.circle_center = circle_center self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
StevenGrove/DynamicHead
ClassificationCircleLoss
false
14,451
[ "Apache-2.0" ]
69
d62aa84e1d1c6a0c74d46258ad77b11413c10bef
https://github.com/StevenGrove/DynamicHead/tree/d62aa84e1d1c6a0c74d46258ad77b11413c10bef
SoftmaxLayer
import torch import torch.nn as nn class SoftmaxLayer(nn.Module): """ Naive softmax-layer """ def __init__(self, output_dim, n_class): """ :param output_dim: int :param n_class: int """ super(SoftmaxLayer, self).__init__() self.hidden2tag = nn.Linear(output_dim, n_class) self.criterion = nn.CrossEntropyLoss(size_average=False) def forward(self, x, y): """ :param x: torch.Tensor :param y: torch.Tensor :return: """ tag_scores = self.hidden2tag(x) return self.criterion(tag_scores, y) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'output_dim': 4, 'n_class': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r3 = rindex r0 = rindex % 16 r2 = rindex // 64 tmp0 = tl.load(in_ptr0 + r3, None) tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy= 'evict_last') tmp14 = tl.load(in_ptr1 + r3, None) tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp15 = tmp13 * tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0)) tmp19 = -tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 128, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__log_softmax_mul_neg_sum_1[grid(1)](buf3, buf1, primals_4, 1, 256, num_warps=2, num_stages=1) del buf1 return buf3, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0 class SoftmaxLayerNew(nn.Module): """ Naive softmax-layer """ def __init__(self, output_dim, n_class): """ :param output_dim: int :param n_class: int """ super(SoftmaxLayerNew, self).__init__() self.hidden2tag = nn.Linear(output_dim, n_class) self.criterion = nn.CrossEntropyLoss(size_average=False) def forward(self, input_0, input_1): primals_1 = self.hidden2tag.weight primals_2 = self.hidden2tag.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
Sy-Zhang/ELMoForManyLangs
SoftmaxLayer
false
14,452
[ "MIT" ]
1,414
f82bf0fef80df617e39d34baa3e46d9857e94e65
https://github.com/Sy-Zhang/ELMoForManyLangs/tree/f82bf0fef80df617e39d34baa3e46d9857e94e65
disparityregression
from _paritybench_helpers import _mock_config import torch import numpy as np from torch import nn import torch.utils.data from torch.autograd import Variable import torch.nn.parallel import torch.utils.data.distributed class disparityregression(nn.Module): def __init__(self, maxdisp, cfg): super(disparityregression, self).__init__() self.disp = Variable(torch.Tensor(np.array(range(maxdisp))), requires_grad=False) def forward(self, x, depth): out = torch.sum(x * depth[None, :, None, None], 1) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'maxdisp': 4, 'cfg': _mock_config()}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np from torch import nn import torch.utils.data from torch.autograd import Variable import torch.nn.parallel import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (64 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr1 + (128 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (192 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp4 = tmp0 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp0 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp0 * tmp9 tmp11 = tmp8 + tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, class disparityregressionNew(nn.Module): def __init__(self, maxdisp, cfg): super(disparityregressionNew, self).__init__() self.disp = Variable(torch.Tensor(np.array(range(maxdisp))), requires_grad=False) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Sarah20187/X-StereoLab
disparityregression
false
14,453
[ "MIT" ]
192
9ae8c1413307e7df91b14a7f31e8a95f9e5754f9
https://github.com/Sarah20187/X-StereoLab/tree/9ae8c1413307e7df91b14a7f31e8a95f9e5754f9
RGBBlock
import torch import torch.nn.functional as F import torch.nn as nn class Conv2DMod(nn.Module): def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, **kwargs): super().__init__() self.filters = out_chan self.demod = demod self.kernel = kernel self.stride = stride self.dilation = dilation self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel))) nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu') def _get_same_padding(self, size, kernel, dilation, stride): return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2 def forward(self, x, y): b, _c, h, w = x.shape w1 = y[:, None, :, None, None] w2 = self.weight[None, :, :, :, :] weights = w2 * (w1 + 1) if self.demod: d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + EPS) weights = weights * d x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.filters, *ws) padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride) x = F.conv2d(x, weights, padding=padding, groups=b) x = x.reshape(-1, self.filters, h, w) return x class RGBBlock(nn.Module): def __init__(self, latent_dim, input_channel, upsample, rgba=False): super().__init__() self.input_channel = input_channel self.to_style = nn.Linear(latent_dim, input_channel) out_filters = 1 self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None def forward(self, x, prev_rgb, istyle): _b, _c, _h, _w = x.shape style = self.to_style(istyle) x = self.conv(x, style) if prev_rgb is not None: x = x + prev_rgb if self.upsample is not None: x = self.upsample(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'latent_dim': 4, 'input_channel': 4, 'upsample': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.functional as F import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = 1.0 tmp3 = tmp1 + tmp2 tmp4 = tmp0 * tmp3 tl.store(out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 3, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_mul_sub_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 8 % 8 x0 = xindex % 8 x2 = xindex // 64 x4 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tmp15 = tl.where(tmp14, tmp13, tmp12) tmp16 = tl.load(in_ptr2 + (tmp15 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr3 + (tmp15 + 4 * tmp4), xmask, eviction_policy= 'evict_last') tmp18 = tmp16 + tmp17 tmp19 = tmp18 - tmp11 tmp21 = tmp19 * tmp20 tmp22 = tmp11 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tmp27 = tl.load(in_ptr2 + (tmp8 + 4 * tmp26 + 16 * x2), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr3 + (tmp8 + 4 * tmp26), xmask, eviction_policy= 'evict_last') tmp29 = tmp27 + tmp28 tmp30 = tl.load(in_ptr2 + (tmp15 + 4 * tmp26 + 16 * x2), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr3 + (tmp15 + 4 * tmp26), xmask, eviction_policy= 'evict_last') tmp32 = tmp30 + tmp31 tmp33 = tmp32 - tmp29 tmp34 = tmp33 * tmp20 tmp35 = tmp29 + tmp34 tmp36 = tmp35 - tmp22 tmp38 = tmp36 * tmp37 tmp39 = tmp22 + tmp38 tl.store(in_out_ptr0 + x4, tmp39, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_4, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 1, 4, 1, 1), (4, 1, 1, 4, 4), torch. float32) get_raw_stream(0) triton_poi_fused_add_mul_0[grid(16)](primals_5, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf2, (1, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(8)](buf3, 8, XBLOCK=8, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((8, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(8)](buf4, 8, XBLOCK=8, num_warps= 1, num_stages=1) buf5 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(8)](buf5, 8, XBLOCK=8, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(8)](buf6, 8, XBLOCK=8, num_warps= 1, num_stages=1) buf7 = empty_strided_cuda((8,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(8)](buf7, 8, XBLOCK=8, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((8, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(8)](buf9, 8, XBLOCK=8, num_warps=1, num_stages=1) buf10 = empty_strided_cuda((4, 1, 8, 8), (64, 256, 8, 1), torch.float32 ) buf11 = reinterpret_tensor(buf10, (4, 1, 8, 8), (64, 64, 8, 1), 0) del buf10 triton_poi_fused__unsafe_index_add_mul_sub_4[grid(256)](buf11, buf3, buf5, buf2, primals_6, buf6, buf7, buf4, buf9, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_6 return buf11, primals_4, primals_5, buf0, reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf3, buf4, buf5, buf6, buf7, buf9 class Conv2DMod(nn.Module): def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, **kwargs): super().__init__() self.filters = out_chan self.demod = demod self.kernel = kernel self.stride = stride self.dilation = dilation self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel))) nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu') def _get_same_padding(self, size, kernel, dilation, stride): return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2 def forward(self, x, y): b, _c, h, w = x.shape w1 = y[:, None, :, None, None] w2 = self.weight[None, :, :, :, :] weights = w2 * (w1 + 1) if self.demod: d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + EPS) weights = weights * d x = x.reshape(1, -1, h, w) _, _, *ws = weights.shape weights = weights.reshape(b * self.filters, *ws) padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride) x = F.conv2d(x, weights, padding=padding, groups=b) x = x.reshape(-1, self.filters, h, w) return x class RGBBlockNew(nn.Module): def __init__(self, latent_dim, input_channel, upsample, rgba=False): super().__init__() self.input_channel = input_channel self.to_style = nn.Linear(latent_dim, input_channel) out_filters = 1 self.conv = Conv2DMod(input_channel, out_filters, 1, demod=False) self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None def forward(self, input_0, input_1, input_2): primals_2 = self.to_style.weight primals_3 = self.to_style.bias primals_5 = self.conv.weight primals_1 = input_0 primals_4 = input_1 primals_6 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
SongweiGe/DoodlerGAN
RGBBlock
false
14,454
[ "MIT" ]
92
d435d9b3c0579937cd3c22aa2051960ceb921785
https://github.com/SongweiGe/DoodlerGAN/tree/d435d9b3c0579937cd3c22aa2051960ceb921785
SpatialGate
import math import torch import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt class SpatialGate(nn.Module): def __init__(self, in_channels: 'int', num_groups: 'int'=1, kernel_size: 'int'=1, padding: 'int'=0, stride: 'int'=1, gate_activation: 'str'= 'ReTanH', gate_activation_kargs: 'dict'=None, get_running_cost: 'callable'=None): super(SpatialGate, self).__init__() self.num_groups = num_groups self.gate_conv = nn.Conv2d(in_channels, num_groups, kernel_size, padding=padding, stride=stride) self.gate_activation = gate_activation self.gate_activation_kargs = gate_activation_kargs if gate_activation == 'ReTanH': self.gate_activate = lambda x: torch.tanh(x).clamp(min=0) elif gate_activation == 'Sigmoid': self.gate_activate = lambda x: torch.sigmoid(x) elif gate_activation == 'GeReTanH': assert 'tau' in gate_activation_kargs tau = gate_activation_kargs['tau'] ttau = math.tanh(tau) self.gate_activate = lambda x: ((torch.tanh(x - tau) + ttau) / (1 + ttau)).clamp(min=0) else: raise NotImplementedError() self.get_running_cost = get_running_cost self.running_cost = None self.init_parameters() def init_parameters(self, init_gate=0.99): if self.gate_activation == 'ReTanH': bias_value = 0.5 * math.log((1 + init_gate) / (1 - init_gate)) elif self.gate_activation == 'Sigmoid': bias_value = 0.5 * math.log(init_gate / (1 - init_gate)) elif self.gate_activation == 'GeReTanH': tau = self.gate_activation_kargs['tau'] bias_value = 0.5 * math.log((1 + init_gate * math.exp(2 * tau)) / (1 - init_gate)) nn.init.normal_(self.gate_conv.weight, std=0.01) nn.init.constant_(self.gate_conv.bias, bias_value) def encode(self, *inputs): outputs = [x.view(x.shape[0] * self.num_groups, -1, *x.shape[2:]) for x in inputs] return outputs def decode(self, *inputs): outputs = [x.view(x.shape[0] // self.num_groups, -1, *x.shape[2:]) for x in inputs] return outputs def update_running_cost(self, gate): if self.get_running_cost is not None: cost = self.get_running_cost(gate) if self.running_cost is not None: self.running_cost = [(x + y) for x, y in zip(self. running_cost, cost)] else: self.running_cost = cost def clear_running_cost(self): self.running_cost = None def forward(self, data_input, gate_input, masked_func=None): gate = self.gate_activate(self.gate_conv(gate_input)) self.update_running_cost(gate) if masked_func is not None: data_input = masked_func(data_input, gate) data, gate = self.encode(data_input, gate) output, = self.decode(data * gate) return output def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.nn as nn import torch.utils.data from itertools import product as product from math import sqrt as sqrt assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, xmask) @triton.jit def triton_poi_fused_clamp_mul_tanh_view_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = libdevice.tanh(tmp1) tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = tmp0 * tmp4 tl.store(out_ptr0 + x3, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 4, 4), (16, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_mul_tanh_view_1[grid(256)](primals_4, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, primals_4, buf1 class SpatialGateNew(nn.Module): def __init__(self, in_channels: 'int', num_groups: 'int'=1, kernel_size: 'int'=1, padding: 'int'=0, stride: 'int'=1, gate_activation: 'str'= 'ReTanH', gate_activation_kargs: 'dict'=None, get_running_cost: 'callable'=None): super(SpatialGateNew, self).__init__() self.num_groups = num_groups self.gate_conv = nn.Conv2d(in_channels, num_groups, kernel_size, padding=padding, stride=stride) self.gate_activation = gate_activation self.gate_activation_kargs = gate_activation_kargs if gate_activation == 'ReTanH': self.gate_activate = lambda x: torch.tanh(x).clamp(min=0) elif gate_activation == 'Sigmoid': self.gate_activate = lambda x: torch.sigmoid(x) elif gate_activation == 'GeReTanH': assert 'tau' in gate_activation_kargs tau = gate_activation_kargs['tau'] ttau = math.tanh(tau) self.gate_activate = lambda x: ((torch.tanh(x - tau) + ttau) / (1 + ttau)).clamp(min=0) else: raise NotImplementedError() self.get_running_cost = get_running_cost self.running_cost = None self.init_parameters() def init_parameters(self, init_gate=0.99): if self.gate_activation == 'ReTanH': bias_value = 0.5 * math.log((1 + init_gate) / (1 - init_gate)) elif self.gate_activation == 'Sigmoid': bias_value = 0.5 * math.log(init_gate / (1 - init_gate)) elif self.gate_activation == 'GeReTanH': tau = self.gate_activation_kargs['tau'] bias_value = 0.5 * math.log((1 + init_gate * math.exp(2 * tau)) / (1 - init_gate)) nn.init.normal_(self.gate_conv.weight, std=0.01) nn.init.constant_(self.gate_conv.bias, bias_value) def encode(self, *inputs): outputs = [x.view(x.shape[0] * self.num_groups, -1, *x.shape[2:]) for x in inputs] return outputs def decode(self, *inputs): outputs = [x.view(x.shape[0] // self.num_groups, -1, *x.shape[2:]) for x in inputs] return outputs def update_running_cost(self, gate): if self.get_running_cost is not None: cost = self.get_running_cost(gate) if self.running_cost is not None: self.running_cost = [(x + y) for x, y in zip(self. running_cost, cost)] else: self.running_cost = cost def clear_running_cost(self): self.running_cost = None def forward(self, input_0, input_1): primals_1 = self.gate_conv.weight primals_2 = self.gate_conv.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
StevenGrove/DynamicHead
SpatialGate
false
14,455
[ "Apache-2.0" ]
69
d62aa84e1d1c6a0c74d46258ad77b11413c10bef
https://github.com/StevenGrove/DynamicHead/tree/d62aa84e1d1c6a0c74d46258ad77b11413c10bef
VAE_Kl_Loss
import torch import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed class VAE_Kl_Loss(nn.Module): def __init__(self, if_print=False): super(VAE_Kl_Loss, self).__init__() self.if_print = if_print def forward(self, means, variances): loss = self.standard_KL_loss(means, variances) if self.if_print: None return loss def standard_KL_loss(self, means, variances): loss_KL = torch.mean(torch.sum(0.5 * (means ** 2 + torch.exp( variances) - variances - 1), dim=1)) return loss_KL def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional import torch.nn.parallel import torch.utils.data import torch.optim import torch.utils.data.distributed assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_exp_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None) tmp10 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None) tmp19 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp21 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None) tmp28 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp30 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None) tmp1 = tmp0 * tmp0 tmp3 = tl_math.exp(tmp2) tmp4 = tmp1 + tmp3 tmp5 = tmp4 - tmp2 tmp6 = 1.0 tmp7 = tmp5 - tmp6 tmp8 = 0.5 tmp9 = tmp7 * tmp8 tmp11 = tmp10 * tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp15 = tmp14 - tmp12 tmp16 = tmp15 - tmp6 tmp17 = tmp16 * tmp8 tmp18 = tmp9 + tmp17 tmp20 = tmp19 * tmp19 tmp22 = tl_math.exp(tmp21) tmp23 = tmp20 + tmp22 tmp24 = tmp23 - tmp21 tmp25 = tmp24 - tmp6 tmp26 = tmp25 * tmp8 tmp27 = tmp18 + tmp26 tmp29 = tmp28 * tmp28 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp32 - tmp30 tmp34 = tmp33 - tmp6 tmp35 = tmp34 * tmp8 tmp36 = tmp27 + tmp35 tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp39 = tl.sum(tmp37, 1)[:, None] tmp40 = 64.0 tmp41 = tmp39 / tmp40 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp41, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 get_raw_stream(0) triton_per_fused_add_exp_mean_mul_pow_sub_sum_0[grid(1)](buf2, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf2, class VAE_Kl_LossNew(nn.Module): def __init__(self, if_print=False): super(VAE_Kl_LossNew, self).__init__() self.if_print = if_print def standard_KL_loss(self, means, variances): loss_KL = torch.mean(torch.sum(0.5 * (means ** 2 + torch.exp( variances) - variances - 1), dim=1)) return loss_KL def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
TPCD/LifelongReID
VAE_Kl_Loss
false
14,456
[ "MIT" ]
63
cb33f9c29fe398e7546db345fab1c338dda8252f
https://github.com/TPCD/LifelongReID/tree/cb33f9c29fe398e7546db345fab1c338dda8252f
CategoricalSampler
import torch import torch.nn as nn class Sampler(nn.Module): """ args; logits: (batch, n_nodes) return; next_node: (batch, 1) TopKSampler <=> greedy; sample one with biggest probability CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability """ def __init__(self, n_samples=1, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples class CategoricalSampler(Sampler): def forward(self, logits): return torch.multinomial(logits.exp(), self.n_samples) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl_math.exp(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = torch.ops.aten.multinomial.default(buf0, 1) del buf0 buf2 = buf1 del buf1 return buf2, class Sampler(nn.Module): """ args; logits: (batch, n_nodes) return; next_node: (batch, 1) TopKSampler <=> greedy; sample one with biggest probability CategoricalSampler <=> sampling; randomly sample one from possible distribution based on probability """ def __init__(self, n_samples=1, **kwargs): super().__init__(**kwargs) self.n_samples = n_samples class CategoricalSamplerNew(Sampler): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TSLNIHAOGIT/VRP_DRL_MHA
CategoricalSampler
false
14,457
[ "MIT" ]
55
6a59918ffb815fbdab4d75cb78130fc638c64d69
https://github.com/TSLNIHAOGIT/VRP_DRL_MHA/tree/6a59918ffb815fbdab4d75cb78130fc638c64d69
Pointnet
import torch import torch.utils.data import torch.nn as nn class Pointnet(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim, segmentation= False): super().__init__() self.fc_in = nn.Conv1d(in_channels, 2 * hidden_dim, 1) self.fc_0 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_1 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_2 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_3 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.segmentation = segmentation if segmentation: self.fc_out = nn.Conv1d(2 * hidden_dim, out_channels, 1) else: self.fc_out = nn.Linear(hidden_dim, out_channels) self.activation = nn.ReLU() def forward(self, x): x = self.fc_in(x) x = self.fc_0(self.activation(x)) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.fc_1(self.activation(x)) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.fc_2(self.activation(x)) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.fc_3(self.activation(x)) if self.segmentation: x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) else: x = torch.max(x, dim=2)[0] x = self.fc_out(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_max_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 > tmp4 tmp6 = tmp2 == tmp4 tmp7 = tmp2 != tmp2 tmp8 = tmp4 != tmp4 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp2, tmp4) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp21 = tmp20 + tmp1 tmp22 = tmp18 > tmp21 tmp23 = tmp18 == tmp21 tmp24 = tmp18 != tmp18 tmp25 = tmp21 != tmp21 tmp26 = tmp24 > tmp25 tmp27 = tmp22 | tmp26 tmp28 = tmp24 & tmp25 tmp29 = tmp23 | tmp28 tmp30 = tl.full([1], 2, tl.int64) tmp31 = tmp19 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tmp27 | tmp32 tmp34 = tl.where(tmp33, tmp18, tmp21) tmp35 = tl.where(tmp33, tmp19, tmp30) tmp37 = tmp36 + tmp1 tmp38 = tmp34 > tmp37 tmp39 = tmp34 == tmp37 tmp40 = tmp34 != tmp34 tmp41 = tmp37 != tmp37 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 3, tl.int64) tmp47 = tmp35 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tl.where(tmp49, tmp34, tmp37) tmp51 = tl.where(tmp49, tmp35, tmp46) tmp52 = triton_helpers.maximum(tmp2, tmp4) tmp53 = triton_helpers.maximum(tmp52, tmp21) tmp54 = triton_helpers.maximum(tmp53, tmp37) tl.store(out_ptr0 + x2, tmp51, xmask) tl.store(out_ptr1 + x2, tmp54, xmask) @triton.jit def triton_poi_fused_cat_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (4 * x2 + (-4 + x1)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_max_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 > tmp4 tmp13 = tmp2 == tmp4 tmp14 = tmp2 != tmp2 tmp15 = tmp4 != tmp4 tmp16 = tmp14 > tmp15 tmp17 = tmp12 | tmp16 tmp18 = tmp14 & tmp15 tmp19 = tmp13 | tmp18 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tl.full([1], 1, tl.int64) tmp22 = tmp20 < tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 | tmp23 tmp25 = tl.where(tmp24, tmp2, tmp4) tmp26 = tl.where(tmp24, tmp20, tmp21) tmp27 = tmp25 > tmp7 tmp28 = tmp25 == tmp7 tmp29 = tmp25 != tmp25 tmp30 = tmp7 != tmp7 tmp31 = tmp29 > tmp30 tmp32 = tmp27 | tmp31 tmp33 = tmp29 & tmp30 tmp34 = tmp28 | tmp33 tmp35 = tl.full([1], 2, tl.int64) tmp36 = tmp26 < tmp35 tmp37 = tmp34 & tmp36 tmp38 = tmp32 | tmp37 tmp39 = tl.where(tmp38, tmp25, tmp7) tmp40 = tl.where(tmp38, tmp26, tmp35) tmp41 = tmp39 > tmp10 tmp42 = tmp39 == tmp10 tmp43 = tmp39 != tmp39 tmp44 = tmp10 != tmp10 tmp45 = tmp43 > tmp44 tmp46 = tmp41 | tmp45 tmp47 = tmp43 & tmp44 tmp48 = tmp42 | tmp47 tmp49 = tl.full([1], 3, tl.int64) tmp50 = tmp40 < tmp49 tmp51 = tmp48 & tmp50 tmp52 = tmp46 | tmp51 tl.where(tmp52, tmp39, tmp10) tmp54 = tl.where(tmp52, tmp40, tmp49) tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp54, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4), (32, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_convolution_max_1[grid(16)](buf2, primals_5, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_relu_2[grid(128)](buf2, primals_5, buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4), (16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) buf8 = buf4 del buf4 triton_poi_fused_convolution_max_1[grid(16)](buf6, primals_7, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_relu_2[grid(128)](buf6, primals_7, buf8, buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) buf12 = buf8 del buf8 triton_poi_fused_convolution_max_1[grid(16)](buf10, primals_9, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_relu_2[grid(128)](buf10, primals_9, buf12, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4), (16, 4, 1)) buf15 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0) del buf12 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_convolution_max_3[grid(16)](buf14, primals_11, buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 del primals_11 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf15, reinterpret_tensor( primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_13 return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, primals_12, reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 1), 0)) class PointnetNew(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim, segmentation= False): super().__init__() self.fc_in = nn.Conv1d(in_channels, 2 * hidden_dim, 1) self.fc_0 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_1 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_2 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_3 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.segmentation = segmentation if segmentation: self.fc_out = nn.Conv1d(2 * hidden_dim, out_channels, 1) else: self.fc_out = nn.Linear(hidden_dim, out_channels) self.activation = nn.ReLU() def forward(self, input_0): primals_1 = self.fc_in.weight primals_2 = self.fc_in.bias primals_4 = self.fc_0.weight primals_5 = self.fc_0.bias primals_6 = self.fc_1.weight primals_7 = self.fc_1.bias primals_8 = self.fc_2.weight primals_9 = self.fc_2.bias primals_10 = self.fc_3.weight primals_11 = self.fc_3.bias primals_12 = self.fc_out.weight primals_13 = self.fc_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
StructuralNeurobiologyLab/LightConvPoint
Pointnet
false
14,458
[ "Apache-2.0" ]
58
3f353f45e9e910fa390a74520dfd478e3e88f104
https://github.com/StructuralNeurobiologyLab/LightConvPoint/tree/3f353f45e9e910fa390a74520dfd478e3e88f104
ConvPlus
import torch import torch.nn as nn class ConvPlus(nn.Module): def __init__(self, c1, c2, k=3, s=1, g=1, bias=True): super(ConvPlus, self).__init__() self.cv1 = nn.Conv2d(c1, c2, (k, 1), s, (k // 2, 0), groups=g, bias =bias) self.cv2 = nn.Conv2d(c1, c2, (1, k), s, (0, k // 2), groups=g, bias =bias) def forward(self, x): return self.cv1(x) + self.cv2(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4, 'c2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf2, primals_2, buf1, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_5 return buf2, primals_1, primals_3, primals_4 class ConvPlusNew(nn.Module): def __init__(self, c1, c2, k=3, s=1, g=1, bias=True): super(ConvPlusNew, self).__init__() self.cv1 = nn.Conv2d(c1, c2, (k, 1), s, (k // 2, 0), groups=g, bias =bias) self.cv2 = nn.Conv2d(c1, c2, (1, k), s, (0, k // 2), groups=g, bias =bias) def forward(self, input_0): primals_1 = self.cv1.weight primals_2 = self.cv1.bias primals_4 = self.cv2.weight primals_5 = self.cv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Syencil/mobile-yolov5-pruning-distillation
ConvPlus
false
14,459
[ "MIT" ]
554
5d52454bb397ae49677b5da398e4192abc681325
https://github.com/Syencil/mobile-yolov5-pruning-distillation/tree/5d52454bb397ae49677b5da398e4192abc681325
attentionLayer
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import MultiheadAttention from itertools import product as product class attentionLayer(nn.Module): def __init__(self, d_model, nhead, dropout=0.1): super(attentionLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, d_model * 4) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(d_model * 4, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def forward(self, src, tar): src = src.transpose(0, 1) tar = tar.transpose(0, 1) src2 = self.self_attn(tar, src, src, attn_mask=None, key_padding_mask=None)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) src = src.transpose(0, 1) return src def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'nhead': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F from torch.nn import MultiheadAttention from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tmp1 = tl.load(in_out_ptr0 + (x1 + 4 * y0), xmask & ymask) tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + (x1 + 4 * y0), tmp4, xmask & ymask) @triton.jit def triton_poi_fused_native_layer_norm_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (16, 4), (4, 1)) assert_size_stride(primals_10, (16,), (1,)) assert_size_stride(primals_11, (4, 16), (16, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 16), out=buf1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 32), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0) del buf2 get_raw_stream(0) triton_poi_fused_add_0[grid(16)](buf3, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0) del buf0 triton_poi_fused_mul_1[grid(16)](buf4, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0) del buf1 triton_poi_fused_add_2[grid(16)](buf5, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_4 buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (4, 1, 4), (1, 0, 4), 0), out=buf6) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = buf6 del buf6 triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf8, reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf9, buf10, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0) del buf9 extern_kernels.mm(reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf11) buf12 = buf11 del buf11 triton_poi_fused_add_native_layer_norm_6[grid(4, 4)](buf12, primals_1, primals_6, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) del primals_6 buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf14 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_7[grid(4)](buf12, buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_8[grid(16)](buf12, buf13, buf14, primals_7, primals_8, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf16 = reinterpret_tensor(buf7, (4, 16), (16, 1), 0) del buf7 extern_kernels.mm(buf15, reinterpret_tensor(primals_9, (4, 16), (1, 4), 0), out=buf16) buf17 = buf16 del buf16 triton_poi_fused_relu_9[grid(64)](buf17, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_10 buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf17, reinterpret_tensor(primals_11, (16, 4), (1, 16), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_add_10[grid(16)](buf19, buf15, primals_12, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 buf20 = buf14 del buf14 buf21 = buf13 del buf13 triton_poi_fused_native_layer_norm_7[grid(4)](buf19, buf20, buf21, 4, XBLOCK=4, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_8[grid(16)](buf19, buf20, buf21, primals_13, primals_14, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf20 del buf21 del primals_14 return (reinterpret_tensor(buf22, (4, 4), (1, 4), 0), primals_7, primals_13, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), buf8, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf12, buf15, buf17, buf19, primals_11, primals_9, primals_5, reinterpret_tensor(buf3, ( 4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0)) class attentionLayerNew(nn.Module): def __init__(self, d_model, nhead, dropout=0.1): super(attentionLayerNew, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, d_model * 4) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(d_model * 4, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def forward(self, input_0, input_1): primals_3 = self.self_attn.in_proj_weight primals_4 = self.self_attn.in_proj_bias primals_1 = self.self_attn.out_proj.weight primals_6 = self.self_attn.out_proj.bias primals_9 = self.linear1.weight primals_10 = self.linear1.bias primals_11 = self.linear2.weight primals_7 = self.linear2.bias primals_8 = self.norm1.weight primals_12 = self.norm1.bias primals_13 = self.norm2.weight primals_14 = self.norm2.bias primals_2 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
TaoRuijie/TalkNet_ASD
attentionLayer
false
14,460
[ "MIT" ]
79
4a2bc4859ee192ab450eaf63937a799212f2b021
https://github.com/TaoRuijie/TalkNet_ASD/tree/4a2bc4859ee192ab450eaf63937a799212f2b021
GlobalLayerNorm
import torch import torch.nn as nn from itertools import product as product class GlobalLayerNorm(nn.Module): def __init__(self, channel_size): super(GlobalLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) self.reset_parameters() def reset_parameters(self): self.gamma.data.fill_(1) self.beta.data.zero_() def forward(self, y): mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) var = torch.pow(y - mean, 2).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) gLN_y = self.gamma * (y - mean) / torch.pow(var + 1e-08, 0.5 ) + self.beta return gLN_y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mean_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask) tmp12 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask) tmp18 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask) tmp21 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask) tmp23 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask) tmp27 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask) tmp28 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask) tmp30 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask) tmp32 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp11 + tmp12 tmp15 = tmp13 + tmp14 tmp16 = tmp15 / tmp7 tmp17 = tmp8 + tmp16 tmp20 = tmp18 + tmp19 tmp22 = tmp20 + tmp21 tmp24 = tmp22 + tmp23 tmp25 = tmp24 / tmp7 tmp26 = tmp17 + tmp25 tmp29 = tmp27 + tmp28 tmp31 = tmp29 + tmp30 tmp33 = tmp31 + tmp32 tmp34 = tmp33 / tmp7 tmp35 = tmp26 + tmp34 tmp36 = tmp35 / tmp7 tmp37 = tmp0 - tmp36 tmp38 = tmp37 * tmp37 tmp39 = tmp1 - tmp36 tmp40 = tmp39 * tmp39 tmp41 = tmp38 + tmp40 tmp42 = tmp3 - tmp36 tmp43 = tmp42 * tmp42 tmp44 = tmp41 + tmp43 tmp45 = tmp5 - tmp36 tmp46 = tmp45 * tmp45 tmp47 = tmp44 + tmp46 tmp48 = tmp47 / tmp7 tmp49 = tmp9 - tmp36 tmp50 = tmp49 * tmp49 tmp51 = tmp10 - tmp36 tmp52 = tmp51 * tmp51 tmp53 = tmp50 + tmp52 tmp54 = tmp12 - tmp36 tmp55 = tmp54 * tmp54 tmp56 = tmp53 + tmp55 tmp57 = tmp14 - tmp36 tmp58 = tmp57 * tmp57 tmp59 = tmp56 + tmp58 tmp60 = tmp59 / tmp7 tmp61 = tmp48 + tmp60 tmp62 = tmp18 - tmp36 tmp63 = tmp62 * tmp62 tmp64 = tmp19 - tmp36 tmp65 = tmp64 * tmp64 tmp66 = tmp63 + tmp65 tmp67 = tmp21 - tmp36 tmp68 = tmp67 * tmp67 tmp69 = tmp66 + tmp68 tmp70 = tmp23 - tmp36 tmp71 = tmp70 * tmp70 tmp72 = tmp69 + tmp71 tmp73 = tmp72 / tmp7 tmp74 = tmp61 + tmp73 tmp75 = tmp27 - tmp36 tmp76 = tmp75 * tmp75 tmp77 = tmp28 - tmp36 tmp78 = tmp77 * tmp77 tmp79 = tmp76 + tmp78 tmp80 = tmp30 - tmp36 tmp81 = tmp80 * tmp80 tmp82 = tmp79 + tmp81 tmp83 = tmp32 - tmp36 tmp84 = tmp83 * tmp83 tmp85 = tmp82 + tmp84 tmp86 = tmp85 / tmp7 tmp87 = tmp74 + tmp86 tmp88 = tmp87 / tmp7 tl.store(out_ptr0 + x2, tmp36, xmask) tl.store(out_ptr1 + x2, tmp88, xmask) @triton.jit def triton_poi_fused_add_div_mul_pow_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x4 = xindex x0 = xindex % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x4, xmask) tmp2 = tl.load(in_ptr2 + (x0 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr3 + (x0 + 4 * x3), xmask, eviction_policy='evict_last' ) tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 - tmp2 tmp4 = tmp0 * tmp3 tmp6 = 1e-08 tmp7 = tmp5 + tmp6 tmp8 = libdevice.sqrt(tmp7) tmp9 = tmp4 / tmp8 tmp11 = tmp9 + tmp10 tl.store(out_ptr0 + x4, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) buf1 = empty_strided_cuda((4, 1, 1, 4), (4, 16, 16, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mean_pow_sub_0[grid(16)](primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_pow_sub_1[grid(256)](primals_2, primals_1, buf0, buf1, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 return buf2, primals_1 class GlobalLayerNormNew(nn.Module): def __init__(self, channel_size): super(GlobalLayerNormNew, self).__init__() self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) self.reset_parameters() def reset_parameters(self): self.gamma.data.fill_(1) self.beta.data.zero_() def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TaoRuijie/TalkNet_ASD
GlobalLayerNorm
false
14,461
[ "MIT" ]
79
4a2bc4859ee192ab450eaf63937a799212f2b021
https://github.com/TaoRuijie/TalkNet_ASD/tree/4a2bc4859ee192ab450eaf63937a799212f2b021
adaILN
import torch import torch.nn as nn from torch.nn.parameter import Parameter class adaILN(nn.Module): def __init__(self, num_features, eps=1e-05): super(adaILN, self).__init__() self.eps = eps self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) self.rho.data.fill_(0.9) def forward(self, input, gamma, beta): in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True ), torch.var(input, dim=[2, 3], keepdim=True) out_in = (input - in_mean) / torch.sqrt(in_var + self.eps) ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True ), torch.var(input, dim=[1, 2, 3], keepdim=True) out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps) out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 - self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2 ).unsqueeze(3) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 64, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 64.0 tmp20 = tmp4 / tmp19 tmp21 = 63.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex x2 = xindex % 4 x3 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = tl.full([XBLOCK, 1], 16, tl.int32) tmp11 = tmp10.to(tl.float32) tmp12 = tmp9 / tmp11 tmp13 = tmp1 - tmp12 tmp14 = tmp13 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.where(xmask, tmp15, 0) tmp18 = tl.sum(tmp17, 1)[:, None] tmp19 = 16.0 tmp20 = tmp4 / tmp19 tmp21 = 15.0 tmp22 = tmp18 / tmp21 tmp23 = 1e-05 tmp24 = tmp22 + tmp23 tmp25 = libdevice.sqrt(tmp24) tmp27 = tmp0 - tmp20 tmp28 = tmp27 / tmp25 tmp29 = tmp26 * tmp28 tmp30 = 1.0 tmp31 = tmp30 - tmp26 tmp33 = tmp0 - tmp32 tmp35 = tmp33 / tmp34 tmp36 = tmp31 * tmp35 tmp37 = tmp29 + tmp36 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp20, xmask) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp25, xmask) tl.store(out_ptr0 + (r1 + 16 * x0), tmp37, xmask) @triton.jit def triton_poi_fused_add_div_mul_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex % 256 x0 = xindex % 16 x2 = xindex // 256 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x4, tmp4, None) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32) buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf6 buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0) del buf9 get_raw_stream(0) triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11, primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf3 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1[grid(16)](buf1, buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, XBLOCK= 8, num_warps=2, num_stages=1) del primals_2 buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mul_rsub_sub_2[grid(4096)](buf12, primals_3, primals_4, buf13, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del primals_4 return buf13, primals_1, primals_3, buf1, buf5, buf7, buf11 class adaILNNew(nn.Module): def __init__(self, num_features, eps=1e-05): super(adaILNNew, self).__init__() self.eps = eps self.rho = Parameter(torch.Tensor(1, num_features, 1, 1)) self.rho.data.fill_(0.9) def forward(self, input_0, input_1, input_2): primals_2 = self.rho primals_1 = input_0 primals_3 = input_1 primals_4 = input_2 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
SubZero12556/Cats2dogs_ONNX
adaILN
false
14,462
[ "MIT" ]
2,519
52a6a60d519e23b02f0847f0fa9f9ead89ca5f4e
https://github.com/SubZero12556/Cats2dogs_ONNX/tree/52a6a60d519e23b02f0847f0fa9f9ead89ca5f4e
Unit1D
import torch import torch.nn as nn import torch.nn.functional as F class Unit1D(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=1, stride =1, padding='same', activation_fn=F.relu, use_bias=True): super(Unit1D, self).__init__() self.conv1d = nn.Conv1d(in_channels, output_channels, kernel_shape, stride, padding=0, bias=use_bias) self._activation_fn = activation_fn self._padding = padding self._stride = stride self._kernel_shape = kernel_shape def compute_pad(self, t): if t % self._stride == 0: return max(self._kernel_shape - self._stride, 0) else: return max(self._kernel_shape - t % self._stride, 0) def forward(self, x): if self._padding == 'same': _batch, _channel, t = x.size() pad_t = self.compute_pad(t) pad_t_f = pad_t // 2 pad_t_b = pad_t - pad_t_f x = F.pad(x, [pad_t_f, pad_t_b]) x = self.conv1d(x) if self._activation_fn is not None: x = self._activation_fn(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(64)](buf1, primals_3, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf2 class Unit1DNew(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=1, stride =1, padding='same', activation_fn=F.relu, use_bias=True): super(Unit1DNew, self).__init__() self.conv1d = nn.Conv1d(in_channels, output_channels, kernel_shape, stride, padding=0, bias=use_bias) self._activation_fn = activation_fn self._padding = padding self._stride = stride self._kernel_shape = kernel_shape def compute_pad(self, t): if t % self._stride == 0: return max(self._kernel_shape - self._stride, 0) else: return max(self._kernel_shape - t % self._stride, 0) def forward(self, input_0): primals_2 = self.conv1d.weight primals_3 = self.conv1d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TencentYoutuResearch/ActionDetection-AFSD
Unit1D
false
14,463
[ "BSD-3-Clause" ]
112
ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
https://github.com/TencentYoutuResearch/ActionDetection-AFSD/tree/ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
ScaleExp
import torch import torch.nn as nn class ScaleExp(nn.Module): def __init__(self, init_value=1.0): super(ScaleExp, self).__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input): return torch.exp(input * self.scale) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 * tmp2 tmp4 = tl_math.exp(tmp3) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (1,), (1,)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_0[grid(256)](primals_2, primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 return buf0, primals_2, buf0 class ScaleExpNew(nn.Module): def __init__(self, init_value=1.0): super(ScaleExpNew, self).__init__() self.scale = nn.Parameter(torch.FloatTensor([init_value])) def forward(self, input_0): primals_1 = self.scale primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
TencentYoutuResearch/ActionDetection-AFSD
ScaleExp
false
14,464
[ "BSD-3-Clause" ]
112
ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
https://github.com/TencentYoutuResearch/ActionDetection-AFSD/tree/ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
GAT
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=False ): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor( in_features, out_features).type(torch.FloatTensor if torch.cuda .is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(2 * out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) N = h.size()[0] a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1) ], dim=1).view(N, -1, 2 * self.out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2)) zero_vec = torch.zeros_like(e) zero_vec = zero_vec.fill_(9e-15) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GAT(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GAT, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, x, adj): x = F.dropout(x, self.dropout) x = torch.cat([att(x, adj) for att in self.attentions], dim=1) x = F.dropout(x, self.dropout) return x def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'nfeat': 4, 'nhid': 4, 'dropout': 0.5, 'alpha': 4, 'nheads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import numpy as np import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_fill_leaky_relu_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp40 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp41 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp51 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp52 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp58 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp74 = tl.load(in_ptr5 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp75 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last') tmp79 = tl.load(in_ptr5 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp80 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp85 = tl.load(in_ptr5 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp91 = tl.load(in_ptr5 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp92 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp108 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to( tl.int1) tmp109 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last') tmp113 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp114 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp119 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp120 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp125 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last').to(tl.int1) tmp126 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy= 'evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = 9e-15 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp11 = tmp10 * tmp3 tmp12 = tl.where(tmp9, tmp10, tmp11) tmp13 = tl.where(tmp8, tmp12, tmp6) tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp18 = tmp17 * tmp3 tmp19 = tl.where(tmp16, tmp17, tmp18) tmp20 = tl.where(tmp15, tmp19, tmp6) tmp21 = triton_helpers.maximum(tmp14, tmp20) tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp22, tmp26, tmp6) tmp28 = triton_helpers.maximum(tmp21, tmp27) tmp29 = tmp7 - tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tmp13 - tmp28 tmp32 = tl_math.exp(tmp31) tmp33 = tmp30 + tmp32 tmp34 = tmp20 - tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp33 + tmp35 tmp37 = tmp27 - tmp28 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp42 = tmp41 * tmp3 tmp43 = tl.where(tmp40, tmp41, tmp42) tmp44 = tl.where(tmp0, tmp43, tmp6) tmp47 = tmp46 * tmp3 tmp48 = tl.where(tmp45, tmp46, tmp47) tmp49 = tl.where(tmp8, tmp48, tmp6) tmp50 = triton_helpers.maximum(tmp44, tmp49) tmp53 = tmp52 * tmp3 tmp54 = tl.where(tmp51, tmp52, tmp53) tmp55 = tl.where(tmp15, tmp54, tmp6) tmp56 = triton_helpers.maximum(tmp50, tmp55) tmp59 = tmp58 * tmp3 tmp60 = tl.where(tmp57, tmp58, tmp59) tmp61 = tl.where(tmp22, tmp60, tmp6) tmp62 = triton_helpers.maximum(tmp56, tmp61) tmp63 = tmp44 - tmp62 tmp64 = tl_math.exp(tmp63) tmp65 = tmp49 - tmp62 tmp66 = tl_math.exp(tmp65) tmp67 = tmp64 + tmp66 tmp68 = tmp55 - tmp62 tmp69 = tl_math.exp(tmp68) tmp70 = tmp67 + tmp69 tmp71 = tmp61 - tmp62 tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp76 = tmp75 * tmp3 tmp77 = tl.where(tmp74, tmp75, tmp76) tmp78 = tl.where(tmp0, tmp77, tmp6) tmp81 = tmp80 * tmp3 tmp82 = tl.where(tmp79, tmp80, tmp81) tmp83 = tl.where(tmp8, tmp82, tmp6) tmp84 = triton_helpers.maximum(tmp78, tmp83) tmp87 = tmp86 * tmp3 tmp88 = tl.where(tmp85, tmp86, tmp87) tmp89 = tl.where(tmp15, tmp88, tmp6) tmp90 = triton_helpers.maximum(tmp84, tmp89) tmp93 = tmp92 * tmp3 tmp94 = tl.where(tmp91, tmp92, tmp93) tmp95 = tl.where(tmp22, tmp94, tmp6) tmp96 = triton_helpers.maximum(tmp90, tmp95) tmp97 = tmp78 - tmp96 tmp98 = tl_math.exp(tmp97) tmp99 = tmp83 - tmp96 tmp100 = tl_math.exp(tmp99) tmp101 = tmp98 + tmp100 tmp102 = tmp89 - tmp96 tmp103 = tl_math.exp(tmp102) tmp104 = tmp101 + tmp103 tmp105 = tmp95 - tmp96 tmp106 = tl_math.exp(tmp105) tmp107 = tmp104 + tmp106 tmp110 = tmp109 * tmp3 tmp111 = tl.where(tmp108, tmp109, tmp110) tmp112 = tl.where(tmp0, tmp111, tmp6) tmp115 = tmp114 * tmp3 tmp116 = tl.where(tmp113, tmp114, tmp115) tmp117 = tl.where(tmp8, tmp116, tmp6) tmp118 = triton_helpers.maximum(tmp112, tmp117) tmp121 = tmp120 * tmp3 tmp122 = tl.where(tmp119, tmp120, tmp121) tmp123 = tl.where(tmp15, tmp122, tmp6) tmp124 = triton_helpers.maximum(tmp118, tmp123) tmp127 = tmp126 * tmp3 tmp128 = tl.where(tmp125, tmp126, tmp127) tmp129 = tl.where(tmp22, tmp128, tmp6) tmp130 = triton_helpers.maximum(tmp124, tmp129) tmp131 = tmp112 - tmp130 tmp132 = tl_math.exp(tmp131) tmp133 = tmp117 - tmp130 tmp134 = tl_math.exp(tmp133) tmp135 = tmp132 + tmp134 tmp136 = tmp123 - tmp130 tmp137 = tl_math.exp(tmp136) tmp138 = tmp135 + tmp137 tmp139 = tmp129 - tmp130 tmp140 = tl_math.exp(tmp139) tmp141 = tmp138 + tmp140 tl.store(out_ptr0 + x0, tmp28, xmask) tl.store(out_ptr1 + x0, tmp39, xmask) tl.store(out_ptr2 + x0, tmp62, xmask) tl.store(out_ptr3 + x0, tmp73, xmask) tl.store(out_ptr4 + x0, tmp96, xmask) tl.store(out_ptr5 + x0, tmp107, xmask) tl.store(out_ptr6 + x0, tmp130, xmask) tl.store(out_ptr7 + x0, tmp141, xmask) @triton.jit def triton_poi_fused__softmax_fill_leaky_relu_where_3(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1) tmp2 = tl.load(in_out_ptr0 + x2, xmask) tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr4 + x2, xmask).to(tl.int1) tmp14 = tl.load(in_out_ptr1 + x2, xmask) tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr7 + x2, xmask).to(tl.int1) tmp24 = tl.load(in_out_ptr2 + x2, xmask) tmp28 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr10 + x2, xmask).to(tl.int1) tmp34 = tl.load(in_out_ptr3 + x2, xmask) tmp38 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last') tmp3 = 4.0 tmp4 = tmp2 * tmp3 tmp5 = tl.where(tmp1, tmp2, tmp4) tmp6 = 9e-15 tmp7 = tl.where(tmp0, tmp5, tmp6) tmp9 = tmp7 - tmp8 tmp10 = tl_math.exp(tmp9) tmp12 = tmp10 / tmp11 tmp15 = tmp14 * tmp3 tmp16 = tl.where(tmp13, tmp14, tmp15) tmp17 = tl.where(tmp0, tmp16, tmp6) tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tmp22 = tmp20 / tmp21 tmp25 = tmp24 * tmp3 tmp26 = tl.where(tmp23, tmp24, tmp25) tmp27 = tl.where(tmp0, tmp26, tmp6) tmp29 = tmp27 - tmp28 tmp30 = tl_math.exp(tmp29) tmp32 = tmp30 / tmp31 tmp35 = tmp34 * tmp3 tmp36 = tl.where(tmp33, tmp34, tmp35) tmp37 = tl.where(tmp0, tmp36, tmp6) tmp39 = tmp37 - tmp38 tmp40 = tl_math.exp(tmp39) tmp42 = tmp40 / tmp41 tl.store(in_out_ptr0 + x2, tmp12, xmask) tl.store(in_out_ptr1 + x2, tmp22, xmask) tl.store(in_out_ptr2 + x2, tmp32, xmask) tl.store(in_out_ptr3 + x2, tmp42, xmask) @triton.jit def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = 0.0 tmp7 = tmp5 > tmp6 tmp8 = 1.0 tmp9 = tmp5 * tmp8 tmp10 = libdevice.expm1(tmp9) tmp11 = tmp10 * tmp8 tmp12 = tl.where(tmp7, tmp9, tmp11) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp4, tmp12, tmp13) tmp15 = tmp0 >= tmp3 tmp16 = tl.full([1], 8, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tmp19 > tmp6 tmp21 = tmp19 * tmp8 tmp22 = libdevice.expm1(tmp21) tmp23 = tmp22 * tmp8 tmp24 = tl.where(tmp20, tmp21, tmp23) tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype) tmp26 = tl.where(tmp18, tmp24, tmp25) tmp27 = tmp0 >= tmp16 tmp28 = tl.full([1], 12, tl.int64) tmp29 = tmp0 < tmp28 tmp30 = tmp27 & tmp29 tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0) tmp32 = tmp31 > tmp6 tmp33 = tmp31 * tmp8 tmp34 = libdevice.expm1(tmp33) tmp35 = tmp34 * tmp8 tmp36 = tl.where(tmp32, tmp33, tmp35) tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype) tmp38 = tl.where(tmp30, tmp36, tmp37) tmp39 = tmp0 >= tmp28 tl.full([1], 16, tl.int64) tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0) tmp43 = tmp42 > tmp6 tmp44 = tmp42 * tmp8 tmp45 = libdevice.expm1(tmp44) tmp46 = tmp45 * tmp8 tmp47 = tl.where(tmp43, tmp44, tmp46) tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp39, tmp47, tmp48) tmp50 = tl.where(tmp30, tmp38, tmp49) tmp51 = tl.where(tmp18, tmp26, tmp50) tmp52 = tl.where(tmp4, tmp14, tmp51) tl.store(out_ptr0 + x2, tmp52, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (8, 1), (1, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (8, 1), (1, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (8, 1), (1, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (8, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.native_dropout.default(primals_1, 0.5, True) del primals_1 buf1 = buf0[0] del buf0 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_2, out=buf3) del primals_2 buf4 = empty_strided_cuda((16, 8), (8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf4, primals_3, out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](primals_4, buf7, 16, XBLOCK =16, num_warps=1, num_stages=1) del primals_4 buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_5, out=buf12) del primals_5 buf13 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf12, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) buf14 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf13, primals_6, out=buf14) buf15 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf20 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_7, out=buf20) del primals_7 buf21 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf20, buf21, 128, XBLOCK=128, num_warps=4, num_stages=1) buf22 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf21, primals_8, out=buf22) buf23 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf22, buf23, 16, XBLOCK=16, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, primals_9, out=buf28) del primals_9 buf29 = empty_strided_cuda((16, 8), (8, 1), torch.float32) triton_poi_fused_cat_0[grid(128)](buf28, buf29, 128, XBLOCK=128, num_warps=4, num_stages=1) buf30 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(buf29, primals_10, out=buf30) buf31 = empty_strided_cuda((4, 4), (4, 1), torch.bool) triton_poi_fused_leaky_relu_1[grid(16)](buf30, buf31, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf9 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf16 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf17 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf24 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf25 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf32 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf33 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused__softmax_fill_leaky_relu_where_2[grid(4)](buf7, buf6, buf5, buf15, buf14, buf23, buf22, buf31, buf30, buf8, buf9, buf16, buf17, buf24, buf25, buf32, buf33, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0) del buf5 buf18 = reinterpret_tensor(buf14, (4, 4), (4, 1), 0) del buf14 buf26 = reinterpret_tensor(buf22, (4, 4), (4, 1), 0) del buf22 buf34 = reinterpret_tensor(buf30, (4, 4), (4, 1), 0) del buf30 triton_poi_fused__softmax_fill_leaky_relu_where_3[grid(16)](buf10, buf18, buf26, buf34, buf7, buf6, buf8, buf9, buf15, buf16, buf17, buf23, buf24, buf25, buf31, buf32, buf33, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf16 del buf17 del buf24 del buf25 del buf32 del buf33 del buf8 del buf9 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf10, buf3, out=buf11) buf19 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf18, buf12, out=buf19) buf27 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf26, buf20, out=buf27) buf35 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf34, buf28, out=buf35) buf36 = empty_strided_cuda((4, 16), (16, 1), torch.float32) triton_poi_fused_cat_4[grid(64)](buf11, buf19, buf27, buf35, buf36, 64, XBLOCK=64, num_warps=1, num_stages=1) buf37 = torch.ops.aten.native_dropout.default(buf36, 0.5, True) del buf36 buf38 = buf37[0] buf39 = buf37[1] del buf37 return (buf38, buf6, buf7, buf10, buf11, buf15, buf18, buf19, buf23, buf26, buf27, buf31, buf34, buf35, buf39, reinterpret_tensor(buf28, (4, 4), (1, 4), 0), reinterpret_tensor(buf29, (8, 16), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), reinterpret_tensor( buf20, (4, 4), (1, 4), 0), reinterpret_tensor(buf21, (8, 16), (1, 8 ), 0), reinterpret_tensor(primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf12, (4, 4), (1, 4), 0), reinterpret_tensor( buf13, (8, 16), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), ( 1, 1), 0), reinterpret_tensor(buf3, (4, 4), (1, 4), 0), reinterpret_tensor(buf4, (8, 16), (1, 8), 0), reinterpret_tensor( primals_3, (1, 8), (1, 1), 0)) class GraphAttentionLayer(nn.Module): """ Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 """ def __init__(self, in_features, out_features, dropout, alpha, concat=False ): super(GraphAttentionLayer, self).__init__() self.dropout = dropout self.in_features = in_features self.out_features = out_features self.alpha = alpha self.concat = concat self.W = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor( in_features, out_features).type(torch.FloatTensor if torch.cuda .is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.a = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(2 * out_features, 1).type(torch.FloatTensor if torch.cuda. is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True) self.leakyrelu = nn.LeakyReLU(self.alpha) def forward(self, input, adj): h = torch.mm(input, self.W) N = h.size()[0] a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1) ], dim=1).view(N, -1, 2 * self.out_features) e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2)) zero_vec = torch.zeros_like(e) zero_vec = zero_vec.fill_(9e-15) attention = torch.where(adj > 0, e, zero_vec) attention = F.softmax(attention, dim=1) attention = F.dropout(attention, self.dropout, training=self.training) h_prime = torch.matmul(attention, h) if self.concat: return F.elu(h_prime) else: return h_prime def __repr__(self): return self.__class__.__name__ + ' (' + str(self.in_features ) + ' -> ' + str(self.out_features) + ')' class GATNew(nn.Module): def __init__(self, nfeat, nhid, dropout, alpha, nheads): super(GATNew, self).__init__() self.dropout = dropout self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)] for i, attention in enumerate(self.attentions): self.add_module('attention_{}'.format(i), attention) def forward(self, input_0, input_1): primals_1 = self.attention_0.W primals_3 = self.attention_0.a primals_2 = self.attention_1.W primals_6 = self.attention_1.a primals_4 = self.attention_2.W primals_8 = self.attention_2.a primals_5 = self.attention_3.W primals_10 = self.attention_3.a primals_7 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
StrangeTcy/Q-BERT
GAT
false
14,465
[ "MIT" ]
57
4e4cd4ddda3036d4bf7d878641592462189245d4
https://github.com/StrangeTcy/Q-BERT/tree/4e4cd4ddda3036d4bf7d878641592462189245d4
Unit3D
import torch import torch.nn as nn import torch.nn.functional as F class Unit3D(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding='spatial_valid', activation_fn=F.relu, use_batch_norm=False, use_bias=False): """Initializes Unit3D module.""" super(Unit3D, self).__init__() self._output_channels = output_channels self._kernel_shape = kernel_shape self._stride = stride self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self._use_bias = use_bias self.padding = padding if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self. _output_channels, kernel_size=self._kernel_shape, stride=self. _stride, padding=0, bias=self._use_bias) def compute_pad(self, dim, s): if s % self._stride[dim] == 0: return max(self._kernel_shape[dim] - self._stride[dim], 0) else: return max(self._kernel_shape[dim] - s % self._stride[dim], 0) def forward(self, x): if self.padding == 'same': _batch, _channel, t, h, w = x.size() pad_t = self.compute_pad(0, t) pad_h = self.compute_pad(1, h) pad_w = self.compute_pad(2, w) pad_t_f = pad_t // 2 pad_t_b = pad_t - pad_t_f pad_h_f = pad_h // 2 pad_h_b = pad_h - pad_h_f pad_w_f = pad_w // 2 pad_w_b = pad_w - pad_w_f pad = [pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b] x = F.pad(x, pad) if self.padding == 'spatial_valid': _batch, _channel, t, h, w = x.size() pad_t = self.compute_pad(0, t) pad_t_f = pad_t // 2 pad_t_b = pad_t - pad_t_f pad = [0, 0, 0, 0, pad_t_f, pad_t_b] x = F.pad(x, pad) x = self.conv3d(x) if self._use_batch_norm: x = self.bn(x) if self._activation_fn is not None: x = self._activation_fn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp3 = 0.0 tmp4 = tmp2 <= tmp3 tl.store(in_out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr0 + x0, tmp4, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1024)](buf1, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1) return buf1, primals_1, primals_2, buf2 class Unit3DNew(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding='spatial_valid', activation_fn=F.relu, use_batch_norm=False, use_bias=False): """Initializes Unit3D module.""" super(Unit3DNew, self).__init__() self._output_channels = output_channels self._kernel_shape = kernel_shape self._stride = stride self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self._use_bias = use_bias self.padding = padding if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self. _output_channels, kernel_size=self._kernel_shape, stride=self. _stride, padding=0, bias=self._use_bias) def compute_pad(self, dim, s): if s % self._stride[dim] == 0: return max(self._kernel_shape[dim] - self._stride[dim], 0) else: return max(self._kernel_shape[dim] - s % self._stride[dim], 0) def forward(self, input_0): primals_2 = self.conv3d.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
TencentYoutuResearch/ActionDetection-AFSD
Unit3D
false
14,466
[ "BSD-3-Clause" ]
112
ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
https://github.com/TencentYoutuResearch/ActionDetection-AFSD/tree/ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
ResidualPointnet
import torch import torch.utils.data import torch.nn as nn class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim): super().__init__() self.fc_0 = nn.Conv1d(in_channels, hidden_dim, 1) self.fc_1 = nn.Conv1d(hidden_dim, out_channels, 1) self.activation = nn.ReLU() if in_channels != out_channels: self.shortcut = nn.Conv1d(in_channels, out_channels, 1) else: self.shortcut = nn.Identity() nn.init.zeros_(self.fc_1.weight) def forward(self, x): x_short = self.shortcut(x) x = self.fc_0(x) x = self.fc_1(self.activation(x)) x = self.activation(x + x_short) return x class ResidualPointnet(nn.Module): """ PointNet-based encoder network with ResNet blocks. Args: c_dim (int): dimension of latent code c dim (int): input points dimension hidden_dim (int): hidden dimension of the network """ def __init__(self, in_channels, out_channels, hidden_dim, segmentation= False): super().__init__() self.fc_in = nn.Conv1d(in_channels, 2 * hidden_dim, 1) self.block_0 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_1 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_2 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_3 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_4 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.segmentation = segmentation if self.segmentation: self.fc_out = nn.Conv1d(2 * hidden_dim, out_channels, 1) else: self.fc_out = nn.Linear(hidden_dim, out_channels) def forward(self, x): x = self.fc_in(x) x = self.block_0(x) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.block_1(x) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.block_2(x) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.block_3(x) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.block_4(x) if self.segmentation: x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) else: x = torch.max(x, dim=2)[0] x = self.fc_out(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_add_convolution_max_relu_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = tmp9 + tmp1 tmp12 = tmp11 + tmp4 tmp13 = tmp10 + tmp12 tmp14 = triton_helpers.maximum(tmp7, tmp13) tmp15 = triton_helpers.maximum(tmp8, tmp14) tmp17 = tmp16 + tmp1 tmp19 = tmp18 + tmp4 tmp20 = tmp17 + tmp19 tmp21 = triton_helpers.maximum(tmp7, tmp20) tmp22 = triton_helpers.maximum(tmp15, tmp21) tmp24 = tmp23 + tmp1 tmp26 = tmp25 + tmp4 tmp27 = tmp24 + tmp26 tmp28 = triton_helpers.maximum(tmp7, tmp27) tmp29 = triton_helpers.maximum(tmp22, tmp28) tmp30 = tmp8 > tmp14 tmp31 = tmp8 == tmp14 tmp32 = tmp8 != tmp8 tmp33 = tmp14 != tmp14 tmp34 = tmp32 > tmp33 tmp35 = tmp30 | tmp34 tmp36 = tmp32 & tmp33 tmp37 = tmp31 | tmp36 tmp38 = tl.full([1], 0, tl.int64) tmp39 = tl.full([1], 1, tl.int64) tmp40 = tmp38 < tmp39 tmp41 = tmp37 & tmp40 tmp42 = tmp35 | tmp41 tmp43 = tl.where(tmp42, tmp8, tmp14) tmp44 = tl.where(tmp42, tmp38, tmp39) tmp45 = tmp43 > tmp21 tmp46 = tmp43 == tmp21 tmp47 = tmp43 != tmp43 tmp48 = tmp21 != tmp21 tmp49 = tmp47 > tmp48 tmp50 = tmp45 | tmp49 tmp51 = tmp47 & tmp48 tmp52 = tmp46 | tmp51 tmp53 = tl.full([1], 2, tl.int64) tmp54 = tmp44 < tmp53 tmp55 = tmp52 & tmp54 tmp56 = tmp50 | tmp55 tmp57 = tl.where(tmp56, tmp43, tmp21) tmp58 = tl.where(tmp56, tmp44, tmp53) tmp59 = tmp57 > tmp28 tmp60 = tmp57 == tmp28 tmp61 = tmp57 != tmp57 tmp62 = tmp28 != tmp28 tmp63 = tmp61 > tmp62 tmp64 = tmp59 | tmp63 tmp65 = tmp61 & tmp62 tmp66 = tmp60 | tmp65 tmp67 = tl.full([1], 3, tl.int64) tmp68 = tmp58 < tmp67 tmp69 = tmp66 & tmp68 tmp70 = tmp64 | tmp69 tl.where(tmp70, tmp57, tmp28) tmp72 = tl.where(tmp70, tmp58, tmp67) tl.store(out_ptr0 + x2, tmp29, xmask) tl.store(out_ptr1 + x2, tmp72, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr2 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp9 = tl.load(in_ptr3 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tmp8 + tmp9 tmp11 = tmp7 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp4, tmp13, tmp14) tmp16 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp19 = tl.load(in_ptr4 + (4 * x2 + (-4 + x1)), tmp16 & xmask, eviction_policy='evict_last', other=0.0) tmp20 = tl.where(tmp4, tmp15, tmp19) tl.store(out_ptr0 + x3, tmp20, xmask) @triton.jit def triton_poi_fused_add_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x3, xmask) tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp9 = 0.0 tmp10 = tmp8 <= tmp9 tl.store(out_ptr0 + x3, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35) = args args.clear() assert_size_stride(primals_1, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_19, (4,), (1,)) assert_size_stride(primals_20, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_21, (4,), (1,)) assert_size_stride(primals_22, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_23, (4,), (1,)) assert_size_stride(primals_24, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_25, (4,), (1,)) assert_size_stride(primals_26, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_27, (4,), (1,)) assert_size_stride(primals_28, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_29, (4,), (1,)) assert_size_stride(primals_30, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_31, (4,), (1,)) assert_size_stride(primals_32, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_33, (4,), (1,)) assert_size_stride(primals_34, (4, 4), (4, 1)) assert_size_stride(primals_35, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4), (32, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = extern_kernels.convolution(buf1, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(64)](buf4, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf5 = extern_kernels.convolution(buf4, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf5, (4, 4, 4), (16, 4, 1)) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) triton_poi_fused_add_convolution_max_relu_2[grid(16)](buf5, primals_9, buf2, primals_5, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](buf5, primals_9, buf2, primals_5, buf6, buf8, 128, XBLOCK=128, num_warps=4, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 4), (16, 4, 1)) buf10 = extern_kernels.convolution(buf8, primals_12, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_1[grid(64)](buf11, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf12, (4, 4, 4), (16, 4, 1)) buf13 = buf6 del buf6 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) triton_poi_fused_add_convolution_max_relu_2[grid(16)](buf12, primals_15, buf9, primals_11, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](buf12, primals_15, buf9, primals_11, buf13, buf15, 128, XBLOCK=128, num_warps=4, num_stages=1) buf16 = extern_kernels.convolution(buf15, primals_16, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf16, (4, 4, 4), (16, 4, 1)) buf17 = extern_kernels.convolution(buf15, primals_18, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf17, (4, 4, 4), (16, 4, 1)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_1[grid(64)](buf18, primals_19, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_19 buf19 = extern_kernels.convolution(buf18, primals_20, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf19, (4, 4, 4), (16, 4, 1)) buf20 = buf13 del buf13 buf21 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) triton_poi_fused_add_convolution_max_relu_2[grid(16)](buf19, primals_21, buf16, primals_17, buf20, buf21, 16, XBLOCK=16, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](buf19, primals_21, buf16, primals_17, buf20, buf22, 128, XBLOCK=128, num_warps=4, num_stages=1) buf23 = extern_kernels.convolution(buf22, primals_22, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf23, (4, 4, 4), (16, 4, 1)) buf24 = extern_kernels.convolution(buf22, primals_24, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf24, (4, 4, 4), (16, 4, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_relu_1[grid(64)](buf25, primals_25, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_25 buf26 = extern_kernels.convolution(buf25, primals_26, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf26, (4, 4, 4), (16, 4, 1)) buf27 = buf20 del buf20 buf28 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) triton_poi_fused_add_convolution_max_relu_2[grid(16)](buf26, primals_27, buf23, primals_23, buf27, buf28, 16, XBLOCK=16, num_warps=1, num_stages=1) buf29 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](buf26, primals_27, buf23, primals_23, buf27, buf29, 128, XBLOCK=128, num_warps=4, num_stages=1) buf30 = extern_kernels.convolution(buf29, primals_28, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf30, (4, 4, 4), (16, 4, 1)) buf31 = extern_kernels.convolution(buf29, primals_30, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf31, (4, 4, 4), (16, 4, 1)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_1[grid(64)](buf32, primals_31, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_31 buf33 = extern_kernels.convolution(buf32, primals_32, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf33, (4, 4, 4), (16, 4, 1)) buf34 = reinterpret_tensor(buf27, (4, 4), (4, 1), 0) del buf27 buf35 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_add_convolution_max_relu_2[grid(16)](buf33, primals_33, buf30, primals_29, buf34, buf35, 16, XBLOCK=16, num_warps=1, num_stages=1) buf36 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_35, buf34, reinterpret_tensor( primals_34, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf36) del primals_35 buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(64)]( buf33, primals_33, buf30, primals_29, buf37, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf30 del buf33 del primals_29 del primals_33 buf38 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(64)]( buf26, primals_27, buf23, primals_23, buf38, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf23 del buf26 del primals_23 del primals_27 buf39 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(64)]( buf19, primals_21, buf16, primals_17, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf16 del buf19 del primals_17 del primals_21 buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(64)]( buf12, primals_15, buf9, primals_11, buf40, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf9 del primals_11 del primals_15 buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_convolution_relu_threshold_backward_4[grid(64)]( buf5, primals_9, buf2, primals_5, buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 del buf5 del primals_5 del primals_9 return (buf36, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, buf1, buf4, buf7, buf8, buf11, buf14, buf15, buf18, buf21, buf22, buf25, buf28, buf29, buf32, buf34, primals_34, reinterpret_tensor(buf35, (4, 4, 1), (4, 1, 1), 0), buf37, buf38, buf39, buf40, buf41) class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim): super().__init__() self.fc_0 = nn.Conv1d(in_channels, hidden_dim, 1) self.fc_1 = nn.Conv1d(hidden_dim, out_channels, 1) self.activation = nn.ReLU() if in_channels != out_channels: self.shortcut = nn.Conv1d(in_channels, out_channels, 1) else: self.shortcut = nn.Identity() nn.init.zeros_(self.fc_1.weight) def forward(self, x): x_short = self.shortcut(x) x = self.fc_0(x) x = self.fc_1(self.activation(x)) x = self.activation(x + x_short) return x class ResidualPointnetNew(nn.Module): """ PointNet-based encoder network with ResNet blocks. Args: c_dim (int): dimension of latent code c dim (int): input points dimension hidden_dim (int): hidden dimension of the network """ def __init__(self, in_channels, out_channels, hidden_dim, segmentation= False): super().__init__() self.fc_in = nn.Conv1d(in_channels, 2 * hidden_dim, 1) self.block_0 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_1 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_2 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_3 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.block_4 = ResidualBlock(2 * hidden_dim, hidden_dim, hidden_dim) self.segmentation = segmentation if self.segmentation: self.fc_out = nn.Conv1d(2 * hidden_dim, out_channels, 1) else: self.fc_out = nn.Linear(hidden_dim, out_channels) def forward(self, input_0): primals_1 = self.fc_in.weight primals_2 = self.fc_in.bias primals_4 = self.block_0.fc_0.weight primals_5 = self.block_0.fc_0.bias primals_8 = self.block_0.fc_1.weight primals_7 = self.block_0.fc_1.bias primals_6 = self.block_0.shortcut.weight primals_9 = self.block_0.shortcut.bias primals_10 = self.block_1.fc_0.weight primals_11 = self.block_1.fc_0.bias primals_14 = self.block_1.fc_1.weight primals_13 = self.block_1.fc_1.bias primals_12 = self.block_1.shortcut.weight primals_15 = self.block_1.shortcut.bias primals_16 = self.block_2.fc_0.weight primals_17 = self.block_2.fc_0.bias primals_20 = self.block_2.fc_1.weight primals_19 = self.block_2.fc_1.bias primals_18 = self.block_2.shortcut.weight primals_21 = self.block_2.shortcut.bias primals_22 = self.block_3.fc_0.weight primals_23 = self.block_3.fc_0.bias primals_26 = self.block_3.fc_1.weight primals_25 = self.block_3.fc_1.bias primals_24 = self.block_3.shortcut.weight primals_27 = self.block_3.shortcut.bias primals_28 = self.block_4.fc_0.weight primals_29 = self.block_4.fc_0.bias primals_32 = self.block_4.fc_1.weight primals_31 = self.block_4.fc_1.bias primals_30 = self.block_4.shortcut.weight primals_33 = self.block_4.shortcut.bias primals_34 = self.fc_out.weight primals_35 = self.fc_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35]) return output[0]
StructuralNeurobiologyLab/LightConvPoint
ResidualPointnet
false
14,467
[ "Apache-2.0" ]
58
3f353f45e9e910fa390a74520dfd478e3e88f104
https://github.com/StructuralNeurobiologyLab/LightConvPoint/tree/3f353f45e9e910fa390a74520dfd478e3e88f104
GroupedChannelNorm
import torch import torch.utils.data import torch import torch.nn as nn class GroupedChannelNorm(nn.Module): def __init__(self, num_groups): super().__init__() self.num_groups = num_groups def forward(self, x): shape = list(x.shape) new_shape = [shape[0], self.num_groups, shape[1] // self.num_groups ] + shape[2:] x = x.view(*new_shape) mean = x.mean(dim=2, keepdim=True) std = x.std(dim=2, keepdim=True) x_norm = (x - mean) / (std + 1e-07) return x_norm.view(*shape) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_groups': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-07 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tl.store(out_ptr0 + x3, tmp27, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 1, 16, 4, 1), torch .float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), class GroupedChannelNormNew(nn.Module): def __init__(self, num_groups): super().__init__() self.num_groups = num_groups def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Theomat/colorization-av-enseirb-2020
GroupedChannelNorm
false
14,468
[ "Apache-2.0" ]
1,422
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
https://github.com/Theomat/colorization-av-enseirb-2020/tree/c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
TransposedConv1d
import torch import torch.nn as nn import torch.nn.functional as F class TransposedConv1d(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=3, stride =2, padding=1, output_padding=1, activation_fn=F.relu, use_batch_norm=False, use_bias=True): super(TransposedConv1d, self).__init__() self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self.transposed_conv1d = nn.ConvTranspose1d(in_channels, output_channels, kernel_shape, stride, padding=padding, output_padding=output_padding, bias=use_bias) if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) def forward(self, x): x = self.transposed_conv1d(x) if self._use_batch_norm: x = self.bn(x) if self._activation_fn is not None: x = self._activation_fn(x) return x def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(2,), padding=(1,), dilation=(1,), transposed=True, output_padding=(1,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 8), (32, 8, 1)) buf1 = reinterpret_tensor(buf0, (4, 8), (8, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 8), (8, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(32)](buf1, primals_2, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), buf2 class TransposedConv1dNew(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=3, stride =2, padding=1, output_padding=1, activation_fn=F.relu, use_batch_norm=False, use_bias=True): super(TransposedConv1dNew, self).__init__() self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self.transposed_conv1d = nn.ConvTranspose1d(in_channels, output_channels, kernel_shape, stride, padding=padding, output_padding=output_padding, bias=use_bias) if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) def forward(self, input_0): primals_1 = self.transposed_conv1d.weight primals_2 = self.transposed_conv1d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TencentYoutuResearch/ActionDetection-AFSD
TransposedConv1d
false
14,469
[ "BSD-3-Clause" ]
112
ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
https://github.com/TencentYoutuResearch/ActionDetection-AFSD/tree/ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
PoolingF
import torch import torch.utils.data import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class PoolingF(nn.Module): def __init__(self): super(PoolingF, self).__init__() model = [nn.AdaptiveMaxPool2d(1)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, x): return self.l2norm(self.model(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_adaptive_max_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x0, tmp30, xmask) @triton.jit def triton_poi_fused_add_div_pow_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) get_raw_stream(0) triton_poi_fused_adaptive_max_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_div_pow_sum_1[grid(16)](buf0, buf1, 16, XBLOCK =16, num_warps=1, num_stages=1) del buf0 return buf1, class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class PoolingFNew(nn.Module): def __init__(self): super(PoolingFNew, self).__init__() model = [nn.AdaptiveMaxPool2d(1)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Theomat/colorization-av-enseirb-2020
PoolingF
false
14,470
[ "Apache-2.0" ]
1,422
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
https://github.com/Theomat/colorization-av-enseirb-2020/tree/c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
TransposedConv3d
import torch import torch.nn as nn import torch.nn.functional as F class TransposedConv3d(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=(3, 3, 3), stride=(2, 1, 1), padding=(1, 1, 1), output_padding=(1, 0, 0), activation_fn=F.relu, use_batch_norm=False, use_bias=True): super(TransposedConv3d, self).__init__() self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self.transposed_conv3d = nn.ConvTranspose3d(in_channels, output_channels, kernel_shape, stride, padding=padding, output_padding=output_padding, bias=use_bias) if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) def forward(self, x): x = self.transposed_conv3d(x) if self._use_batch_norm: x = self.bn(x) if self._activation_fn is not None: x = self._activation_fn(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'output_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(2, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=True, output_padding=(1, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 8, 4, 4), (512, 128, 16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 8, 4, 4), (128, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1, primals_2, buf2, 512, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf2 class TransposedConv3dNew(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=(3, 3, 3), stride=(2, 1, 1), padding=(1, 1, 1), output_padding=(1, 0, 0), activation_fn=F.relu, use_batch_norm=False, use_bias=True): super(TransposedConv3dNew, self).__init__() self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self.transposed_conv3d = nn.ConvTranspose3d(in_channels, output_channels, kernel_shape, stride, padding=padding, output_padding=output_padding, bias=use_bias) if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) def forward(self, input_0): primals_1 = self.transposed_conv3d.weight primals_2 = self.transposed_conv3d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TencentYoutuResearch/ActionDetection-AFSD
TransposedConv3d
false
14,471
[ "BSD-3-Clause" ]
112
ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
https://github.com/TencentYoutuResearch/ActionDetection-AFSD/tree/ed86a0df91e58baa7d78c796ed29cff82b1f3fa6
SM
import torch from torch import nn def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class SM(nn.Module): def __init__(self, k=3, s=1): super(SM, self).__init__() self.avg = nn.AvgPool2d(k, stride=s, padding=autopad(k)) self.max = nn.MaxPool2d(k, stride=s, padding=autopad(k)) def forward(self, x): x = self.max(self.avg(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0) tmp18 = tmp17 + tmp11 tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0) tmp25 = tmp24 + tmp18 tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=0.0) tmp32 = tmp31 + tmp25 tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=0.0) tmp35 = tmp34 + tmp32 tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=0.0) tmp38 = tmp37 + tmp35 tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=0.0) tmp45 = tmp44 + tmp38 tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=0.0) tmp48 = tmp47 + tmp45 tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=0.0) tmp51 = tmp50 + tmp48 tmp52 = 1 + -1 * x0 + -1 * x1 + x0 * x1 + (5 * (5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5) ) + -1 * x0 * (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5) ) + -1 * x1 * (5 * (5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) + (5 * (5 <= 2 + x0) + (2 + x0) * (2 + x0 < 5)) + (5 * (5 <= 2 + x1) + (2 + x1) * (2 + x1 < 5)) tmp53 = tmp51 / tmp52 tl.store(out_ptr0 + x4, tmp53, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=float('-inf')) tmp12 = x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 1 + x0 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp5 & tmp22 tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = x1 tmp27 = tmp26 >= tmp1 tmp28 = tmp26 < tmp3 tmp29 = tmp27 & tmp28 tmp30 = tmp29 & tmp9 tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30 & xmask, other=float('-inf')) tmp32 = triton_helpers.maximum(tmp31, tmp25) tmp33 = tmp29 & tmp15 tmp34 = tl.load(in_ptr0 + x4, tmp33 & xmask, other=float('-inf')) tmp35 = triton_helpers.maximum(tmp34, tmp32) tmp36 = tmp29 & tmp22 tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36 & xmask, other=float('-inf')) tmp38 = triton_helpers.maximum(tmp37, tmp35) tmp39 = 1 + x1 tmp40 = tmp39 >= tmp1 tmp41 = tmp39 < tmp3 tmp42 = tmp40 & tmp41 tmp43 = tmp42 & tmp9 tmp44 = tl.load(in_ptr0 + (3 + x4), tmp43 & xmask, other=float('-inf')) tmp45 = triton_helpers.maximum(tmp44, tmp38) tmp46 = tmp42 & tmp15 tmp47 = tl.load(in_ptr0 + (4 + x4), tmp46 & xmask, other=float('-inf')) tmp48 = triton_helpers.maximum(tmp47, tmp45) tmp49 = tmp42 & tmp22 tmp50 = tl.load(in_ptr0 + (5 + x4), tmp49 & xmask, other=float('-inf')) tmp51 = triton_helpers.maximum(tmp50, tmp48) tl.store(out_ptr0 + x4, tmp51, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_1[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 return buf1, def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class SMNew(nn.Module): def __init__(self, k=3, s=1): super(SMNew, self).__init__() self.avg = nn.AvgPool2d(k, stride=s, padding=autopad(k)) self.max = nn.MaxPool2d(k, stride=s, padding=autopad(k)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
TarikToha/NWPU-Crowd-Sample-Code-for-Localization
SM
false
14,472
[ "MIT" ]
132
0e348b99ea41d4469eff2a78a75648454128d49a
https://github.com/TarikToha/NWPU-Crowd-Sample-Code-for-Localization/tree/0e348b99ea41d4469eff2a78a75648454128d49a
SpatialConv3D
import torch import torch.nn as nn class SpatialConv3D(nn.Module): """ Apply 3D conv. over an input signal composed of several input planes with distinct spatial and time axes, by performing 3D convolution over the spatiotemporal axes rrgs: in_channels (int): number of channels in the input tensor out_channels (int): number of channels produced by the convolution kernel_size (int or tuple): size of the convolution kernel stride (int or tuple): stride padding (int or tuple): zero-padding """ def __init__(self, in_channels, out_channels, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)): super(SpatialConv3D, self).__init__() self.conv1 = nn.Conv3d(in_channels, 16, kernel_size, stride, padding) self.reLu1 = nn.LeakyReLU(inplace=True) self.conv2 = nn.Conv3d(16, out_channels, kernel_size, stride, padding) self.reLu2 = nn.LeakyReLU(inplace=True) def forward(self, x): x = self.conv1(x) x = self.reLu1(x) x = self.conv2(x) x = self.reLu2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_leaky_relu_leaky_relu_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4, 1, 3, 3), (36, 9, 9, 3, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16, 1, 3, 3), (144, 9, 9, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 2, 2), padding=(0, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 16, 4, 2, 2), (256, 16, 4, 2, 1)) buf1 = reinterpret_tensor(buf0, (16, 4, 2, 2), (16, 4, 2, 1), 0) del buf0 buf5 = empty_strided_cuda((16, 4, 2, 2), (16, 4, 2, 1), torch.bool) get_raw_stream(0) triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(256)](buf1, primals_2, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 16, 4, 2, 2), (0, 16, 4, 2, 1), 0), primals_4, stride=(1, 2, 2), padding=(0, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf2, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) triton_poi_fused_leaky_relu_leaky_relu_backward_1[grid(16)](buf3, primals_5, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 return reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), reinterpret_tensor(buf1, (1, 16, 4, 2, 2), (256, 16, 4, 2, 1), 0), buf4, buf5 class SpatialConv3DNew(nn.Module): """ Apply 3D conv. over an input signal composed of several input planes with distinct spatial and time axes, by performing 3D convolution over the spatiotemporal axes rrgs: in_channels (int): number of channels in the input tensor out_channels (int): number of channels produced by the convolution kernel_size (int or tuple): size of the convolution kernel stride (int or tuple): stride padding (int or tuple): zero-padding """ def __init__(self, in_channels, out_channels, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)): super(SpatialConv3DNew, self).__init__() self.conv1 = nn.Conv3d(in_channels, 16, kernel_size, stride, padding) self.reLu1 = nn.LeakyReLU(inplace=True) self.conv2 = nn.Conv3d(16, out_channels, kernel_size, stride, padding) self.reLu2 = nn.LeakyReLU(inplace=True) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Tencent/DVQA
SpatialConv3D
false
14,473
[ "BSD-3-Clause" ]
408
21727333a6b41d54ad1a8beca1fcbe00a69ed347
https://github.com/Tencent/DVQA/tree/21727333a6b41d54ad1a8beca1fcbe00a69ed347
ModulatedConv2d
import math import torch import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2d(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input, style): batch, in_channel, height, width = input.shape if style is not None: style = self.modulation(style).view(batch, 1, in_channel, 1, 1) else: style = torch.ones(batch, 1, in_channel, 1, 1) weight = self.scale * self.weight * style if self.demodulate: demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08) weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) weight = weight.view(batch * self.out_channel, in_channel, self. kernel_size, self.kernel_size) if self.upsample: input = input.view(1, batch * in_channel, height, width) weight = weight.view(batch, self.out_channel, in_channel, self. kernel_size, self.kernel_size) weight = weight.transpose(1, 2).reshape(batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size) out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) out = self.blur(out) elif self.downsample: input = self.blur(input) _, _, height, width = input.shape input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) else: input = input.view(1, batch * in_channel, height, width) out = F.conv2d(input, weight, padding=self.padding, groups=batch) _, _, height, width = out.shape out = out.view(batch, self.out_channel, height, width) return out def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4, 'style_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils.data import torch import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r5 = rindex x0 = xindex % 4 r3 = rindex // 16 x1 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy= 'evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 0.125 tmp2 = tmp0 * tmp1 tmp4 = tmp2 * tmp3 tmp5 = tmp4 * tmp4 tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp8 = tl.where(xmask, tmp6, 0) tmp9 = tl.sum(tmp8, 1)[:, None] tmp10 = 1e-08 tmp11 = tmp9 + tmp10 tmp12 = libdevice.rsqrt(tmp11) tmp13 = tmp4 * tmp12 tl.debug_barrier() tl.store(in_out_ptr0 + x4, tmp12, xmask) tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(16)](primals_3, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_4, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 buf3 = buf0 del buf0 buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5, buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1)) return reinterpret_tensor(buf6, (4, 4, 5, 5), (100, 25, 5, 1), 0 ), primals_2, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0) def make_kernel(k): k = torch.tensor(k, dtype=torch.float32) if len(k.shape) == 1: k = k[None, :] * k[:, None] k /= k.sum() return k def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, minor, in_h, in_w = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, minor, in_h, 1, in_w, 1) out = F.pad(out, [0, up_x - 1, 0, 0, 0, up_y - 1, 0, 0]) out = out.view(-1, minor, in_h * up_y, in_w * up_x) out = F.pad(out, [max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max( pad_y1, 0)]) out = out[:, :, max(-pad_y0, 0):out.shape[2] - max(-pad_y1, 0), max(- pad_x0, 0):out.shape[3] - max(-pad_x1, 0)] out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1) return out[:, :, ::down_y, ::down_x] def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): return upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[ 1], pad[0], pad[1]) def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): return F.leaky_relu(input + bias, negative_slope) * scale class Blur(nn.Module): def __init__(self, kernel, pad, upsample_factor=1): super().__init__() kernel = make_kernel(kernel) if upsample_factor > 1: kernel = kernel * upsample_factor ** 2 self.register_buffer('kernel', kernel) self.pad = pad def forward(self, input): out = upfirdn2d(input, self.kernel, pad=self.pad) return out class EqualLinear(nn.Module): def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None): super().__init__() self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) if bias: self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) else: self.bias = None self.activation = activation self.scale = math.sqrt(1) / math.sqrt(in_dim) * lr_mul self.lr_mul = lr_mul def forward(self, input): if self.activation: out = F.linear(input, self.weight * self.scale) out = fused_leaky_relu(out, self.bias * self.lr_mul) else: out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul) return out def __repr__(self): return ( f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' ) class ModulatedConv2dNew(nn.Module): def __init__(self, in_channel, out_channel, kernel_size, style_dim, demodulate=True, upsample=False, downsample=False, blur_kernel=[1, 3, 3, 1]): super().__init__() self.eps = 1e-08 self.kernel_size = kernel_size self.in_channel = in_channel self.out_channel = out_channel self.upsample = upsample self.downsample = downsample if upsample: factor = 2 p = len(blur_kernel) - factor - (kernel_size - 1) pad0 = (p + 1) // 2 + factor - 1 pad1 = p // 2 + 1 self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor =factor) if downsample: factor = 2 p = len(blur_kernel) - factor + (kernel_size - 1) pad0 = (p + 1) // 2 pad1 = p // 2 self.blur = Blur(blur_kernel, pad=(pad0, pad1)) fan_in = in_channel * kernel_size ** 2 self.scale = math.sqrt(1) / math.sqrt(fan_in) self.padding = kernel_size // 2 self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)) if style_dim is not None and style_dim > 0: self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) self.demodulate = demodulate def __repr__(self): return ( f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})' ) def forward(self, input_0, input_1): primals_5 = self.weight primals_2 = self.modulation.weight primals_4 = self.modulation.bias primals_1 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Theomat/colorization-av-enseirb-2020
ModulatedConv2d
false
14,474
[ "Apache-2.0" ]
1,422
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
https://github.com/Theomat/colorization-av-enseirb-2020/tree/c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
TokenEmbedding
import torch import torch.nn as nn class TokenEmbedding(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbedding, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular') for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, x): x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'c_in': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_copy_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 24 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel y0 = yindex % 6 x2 = xindex y1 = yindex // 6 tmp0 = y0 tmp1 = tl.full([1, 1], 5, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]) tmp4 = tl.full([1, 1], 1, tl.int64) tmp5 = tmp3 < tmp4 tmp6 = tmp5 & tmp2 tmp7 = tl.broadcast_to(y0, [XBLOCK, YBLOCK]) tmp8 = tmp7 >= tmp4 tmp9 = tmp7 < tmp1 tmp10 = tmp8 & tmp9 tmp11 = tmp10 & tmp6 tmp12 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp11 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = float('nan') tmp14 = tl.where(tmp10, tmp12, tmp13) tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp6, tmp14, tmp15) tmp17 = tmp3 >= tmp4 tmp18 = tmp3 < tmp1 tmp19 = tmp17 & tmp18 tmp20 = tmp19 & tmp2 tmp21 = tl.load(in_ptr0 + (-20 + x2 + 4 * y0 + 16 * y1), tmp20 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp22 = tl.where(tmp19, tmp21, tmp13) tmp23 = tl.where(tmp5, tmp16, tmp22) tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype) tmp25 = tl.where(tmp2, tmp23, tmp24) tmp26 = tmp0 < tmp4 tmp27 = tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]) tmp28 = tmp27 >= tmp4 tmp29 = tmp27 < tmp1 tmp30 = tmp28 & tmp29 tmp31 = tmp30 & tmp26 tmp32 = tl.load(in_ptr0 + (12 + x2 + 4 * y0 + 16 * y1), tmp31 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp33 = tl.where(tmp30, tmp32, tmp13) tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype) tmp35 = tl.where(tmp26, tmp33, tmp34) tmp36 = tmp0 >= tmp4 tmp37 = tmp0 < tmp1 tmp38 = tmp36 & tmp37 tmp39 = tl.load(in_ptr0 + (-4 + x2 + 4 * y0 + 16 * y1), tmp38 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp40 = tl.where(tmp38, tmp39, tmp13) tmp41 = tl.where(tmp26, tmp35, tmp40) tmp42 = tl.where(tmp2, tmp25, tmp41) tl.store(out_ptr0 + (y0 + 6 * x2 + 24 * y1), tmp42, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3), (12, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 6), (24, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_copy_0[grid(24, 4)](primals_1, buf1, 24, 4, XBLOCK =4, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(64)](buf3, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), primals_2, buf1 class TokenEmbeddingNew(nn.Module): def __init__(self, c_in, d_model): super(TokenEmbeddingNew, self).__init__() padding = 1 if torch.__version__ >= '1.5.0' else 2 self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model, kernel_size=3, padding=padding, padding_mode='circular') for m in self.modules(): if isinstance(m, nn.Conv1d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='leaky_relu') def forward(self, input_0): primals_2 = self.tokenConv.weight primals_3 = self.tokenConv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TheaperDeng/Informer2020
TokenEmbedding
false
14,475
[ "Apache-2.0" ]
2,296
90e080593e9c345f5f9676359bb3d1618e9aa735
https://github.com/TheaperDeng/Informer2020/tree/90e080593e9c345f5f9676359bb3d1618e9aa735
Normalize
import torch import torch.utils.data import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class NormalizeNew(nn.Module): def __init__(self, power=2): super(NormalizeNew, self).__init__() self.power = power def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Theomat/colorization-av-enseirb-2020
Normalize
false
14,476
[ "Apache-2.0" ]
1,422
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
https://github.com/Theomat/colorization-av-enseirb-2020/tree/c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
Linear3D
import math import torch import torch as th from torch.nn import Parameter def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3D(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3D, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj_matrix=None, permutation_matrix=None): input_ = [input] if input.dim() == 2: if permutation_matrix is not None: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, permutation_matrix.shape[1]])) elif hasattr(self, 'noise'): input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features - 1])) else: input_.append(input.unsqueeze(1).expand([input.shape[0], self.channels, self.in_features])) if adj_matrix is not None and permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ (adj_matrix.t(). unsqueeze(2) * permutation_matrix)).transpose(0, 1)) elif adj_matrix is not None: input_.append(input_[-1] * adj_matrix.t().unsqueeze(0)) elif permutation_matrix is not None: input_.append((input_[-1].transpose(0, 1) @ permutation_matrix).t() ) if hasattr(self, 'noise'): self.noise.normal_() input_.append(th.cat([input_[-1], self.noise], 2)) return functional_linear3d(input_[-1], self.weight, self.bias) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'channels': 4, 'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch as th from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 % 4 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask) tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x2 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(256)](primals_1, buf0, 256, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(256)](primals_2, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2) del buf1 buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused_add_2[grid(256)](buf3, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4, 4, 4), (16, 64, 4, 1), 0 ), reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0) def functional_linear3d(input, weight, bias=None): """ Apply a linear transformation to the incoming data: :math:`y = xA^T + b`. Shape: - Input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - Weight: :math:`(out\\_features, in\\_features)` - Bias: :math:`(out\\_features)` - Output: :math:`(N, *, out\\_features)` """ output = input.transpose(0, 1).matmul(weight) if bias is not None: output += bias.unsqueeze(1) return output.transpose(0, 1) class Linear3DNew(th.nn.Module): """Applies a linear transformation to the incoming data: :math:`y = Ax + b`. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to False, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of additional dimensions - Output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape `(out_features x in_features)` bias: the learnable bias of the module of shape `(out_features)` Examples:: >>> m = nn.Linear(3, 20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) """ def __init__(self, channels, in_features, out_features, batch_size=-1, bias=True, noise=False): super(Linear3DNew, self).__init__() self.in_features = in_features self.out_features = out_features self.channels = channels if noise: self.in_features += 1 self.weight = Parameter(th.Tensor(channels, self.in_features, out_features)) if bias: self.bias = Parameter(th.Tensor(channels, out_features)) else: self.register_parameter('bias', None) if noise: self.register_buffer('noise', th.Tensor(batch_size, channels, 1)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def extra_repr(self): return 'in_features={}, out_features={}, bias={}'.format(self. in_features, self.out_features, self.bias is not None) def apply_filter(self, permutation_matrix): transpose_weight = self.weight.transpose(1, 2) @ permutation_matrix self.weight = Parameter(transpose_weight.transpose(1, 2)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
TheSignPainter/CausalDiscoveryToolbox
Linear3D
false
14,477
[ "MIT" ]
528
33eae18184905e505be978b08003b9477bf38e0c
https://github.com/TheSignPainter/CausalDiscoveryToolbox/tree/33eae18184905e505be978b08003b9477bf38e0c
TemporalEmbedding
import math import torch import torch.nn as nn class FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class TemporalEmbedding(nn.Module): def __init__(self, d_model, embed_type='fixed', freq='h'): super(TemporalEmbedding, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_model) self.hour_embed = Embed(hour_size, d_model) self.weekday_embed = Embed(weekday_size, d_model) self.day_embed = Embed(day_size, d_model) self.month_embed = Embed(month_size, d_model) def forward(self, x): x = x.long() minute_x = self.minute_embed(x[:, :, 4]) if hasattr(self, 'minute_embed') else 0.0 hour_x = self.hour_embed(x[:, :, 3]) weekday_x = self.weekday_embed(x[:, :, 2]) day_x = self.day_embed(x[:, :, 1]) month_x = self.month_embed(x[:, :, 0]) return hour_x + weekday_x + day_x + month_x + minute_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_embedding_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x2 = xindex // 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (12 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (8 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (4 + x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp26 = tl.load(in_ptr0 + (x1 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp1 = tmp0.to(tl.int64) tmp2 = tl.full([XBLOCK], 24, tl.int32) tmp3 = tmp1 + tmp2 tmp4 = tmp1 < 0 tmp5 = tl.where(tmp4, tmp3, tmp1) tl.device_assert((0 <= tmp5) & (tmp5 < 24) | ~xmask, 'index out of bounds: 0 <= tmp5 < 24') tmp7 = tl.load(in_ptr1 + (x0 + 4 * tmp5), xmask) tmp9 = tmp8.to(tl.int64) tmp10 = tl.full([XBLOCK], 7, tl.int32) tmp11 = tmp9 + tmp10 tmp12 = tmp9 < 0 tmp13 = tl.where(tmp12, tmp11, tmp9) tl.device_assert((0 <= tmp13) & (tmp13 < 7) | ~xmask, 'index out of bounds: 0 <= tmp13 < 7') tmp15 = tl.load(in_ptr2 + (x0 + 4 * tmp13), xmask) tmp16 = tmp7 + tmp15 tmp18 = tmp17.to(tl.int64) tmp19 = tl.full([XBLOCK], 32, tl.int32) tmp20 = tmp18 + tmp19 tmp21 = tmp18 < 0 tmp22 = tl.where(tmp21, tmp20, tmp18) tl.device_assert((0 <= tmp22) & (tmp22 < 32) | ~xmask, 'index out of bounds: 0 <= tmp22 < 32') tmp24 = tl.load(in_ptr3 + (x0 + 4 * tmp22), xmask) tmp25 = tmp16 + tmp24 tmp27 = tmp26.to(tl.int64) tmp28 = tl.full([XBLOCK], 13, tl.int32) tmp29 = tmp27 + tmp28 tmp30 = tmp27 < 0 tmp31 = tl.where(tmp30, tmp29, tmp27) tl.device_assert((0 <= tmp31) & (tmp31 < 13) | ~xmask, 'index out of bounds: 0 <= tmp31 < 13') tmp33 = tl.load(in_ptr4 + (x0 + 4 * tmp31), xmask) tmp34 = tmp25 + tmp33 tmp35 = 0.0 tmp36 = tmp34 + tmp35 tl.store(out_ptr0 + x4, tmp36, xmask) def call(args): arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (24, 4), (4, 1)) assert_size_stride(arg2_1, (7, 4), (4, 1)) assert_size_stride(arg3_1, (32, 4), (4, 1)) assert_size_stride(arg4_1, (13, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_embedding_0[grid(256)](arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del arg2_1 del arg3_1 del arg4_1 return buf0, class FixedEmbedding(nn.Module): def __init__(self, c_in, d_model): super(FixedEmbedding, self).__init__() w = torch.zeros(c_in, d_model).float() w.require_grad = False position = torch.arange(0, c_in).float().unsqueeze(1) div_term = (torch.arange(0, d_model, 2).float() * -(math.log( 10000.0) / d_model)).exp() w[:, 0::2] = torch.sin(position * div_term) w[:, 1::2] = torch.cos(position * div_term) self.emb = nn.Embedding(c_in, d_model) self.emb.weight = nn.Parameter(w, requires_grad=False) def forward(self, x): return self.emb(x).detach() class TemporalEmbeddingNew(nn.Module): def __init__(self, d_model, embed_type='fixed', freq='h'): super(TemporalEmbeddingNew, self).__init__() minute_size = 4 hour_size = 24 weekday_size = 7 day_size = 32 month_size = 13 Embed = FixedEmbedding if embed_type == 'fixed' else nn.Embedding if freq == 't': self.minute_embed = Embed(minute_size, d_model) self.hour_embed = Embed(hour_size, d_model) self.weekday_embed = Embed(weekday_size, d_model) self.day_embed = Embed(day_size, d_model) self.month_embed = Embed(month_size, d_model) def forward(self, input_0): arg1_1 = self.hour_embed.emb.weight arg2_1 = self.weekday_embed.emb.weight arg3_1 = self.day_embed.emb.weight arg4_1 = self.month_embed.emb.weight arg0_1 = input_0 output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1]) return output[0]
TheaperDeng/Informer2020
TemporalEmbedding
false
14,478
[ "Apache-2.0" ]
2,296
90e080593e9c345f5f9676359bb3d1618e9aa735
https://github.com/TheaperDeng/Informer2020/tree/90e080593e9c345f5f9676359bb3d1618e9aa735