entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
CrossEn
import torch from torch import nn import torch.nn.functional as F import torch.cuda class CrossEn(nn.Module): def forward(self, sim_matrix): logpt = F.log_softmax(sim_matrix, dim=-1) logpt = torch.diag(logpt) nce_loss = -logpt sim_loss = nce_loss.mean() return sim_loss def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_per_fused__log_softmax_diagonal_copy_mean_neg_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tmp14 = -tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = 4.0 tmp19 = tmp17 / tmp18 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(16)](arg0_1, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused__log_softmax_diagonal_copy_mean_neg_1[grid(1)](buf2, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf2, class CrossEnNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LoveEachDay/towhee
CrossEn
false
11,651
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
HardMish
import torch from torch import nn import torch.cuda def hard_mish(x, inplace: 'bool'=False): if inplace: return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) else: return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMish(nn.Module): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md Args: inplace(`Bool`): whether use inplace version. Returns: (`torch.Tensor`) output tensor after activation. """ def __init__(self, inplace: 'bool'=False): super().__init__() self.inplace = inplace def forward(self, x): return hard_mish(x, self.inplace) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp0 + tmp3 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = triton_helpers.minimum(tmp6, tmp3) tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def hard_mish(x, inplace: 'bool'=False): if inplace: return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) else: return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMishNew(nn.Module): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md Args: inplace(`Bool`): whether use inplace version. Returns: (`torch.Tensor`) output tensor after activation. """ def __init__(self, inplace: 'bool'=False): super().__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LoveEachDay/towhee
HardMish
false
11,652
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
Conv2dSame
import math import torch from typing import List from typing import Union from torch import nn import torch.nn.functional as F from typing import Tuple import torch.cuda from typing import Optional from torch.nn.common_types import _size_2_t def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int') ->int: """ Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution Args: x(`Int`): Input tensor shape. k(`Int`): Convolution kernel size. s(`Int`): Convolution stride parameter. d(`Int`): Convolution dilation parameter. Returns: (`Int`): Padding value for 'SAME' padding. """ return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) def pad_same(x: 'torch.Tensor', k: 'List[int]', s: 'List[int]', d: 'List[int]'=(1, 1), value: 'float'=0) ->torch.Tensor: """ Dynamically pad input x with 'SAME' padding for conv with specified args Args: x(`torch.Tensor`): Input tensor. k(`List[Int]`): Convolution kernel sizes. s(`List[Int]`): Convolution stride parameters. d(`List[Int]`): Convolution dilation parameter. value(`Float`): Value for padding. Returns: (`torch.Tensor`): Output Tensor for conv with 'SAME' padding. """ ih, iw = x.size()[-2:] pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) if pad_h > 0 or pad_w > 0: x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) return x def conv2d_same(x: 'torch.Tensor', weight: 'torch.Tensor', bias: 'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1), padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1), groups: 'int'=1): """ Tensorflow like 'SAME' convolution function for 2D convolutions. """ x = pad_same(x, weight.shape[-2:], stride, dilation) _ = padding return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) class Conv2dSame(nn.Conv2d): """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions. Args: in_channels (`int`): Number of channels in the input image. out_channels (`int`): Number of channels produced by the convolution. kernel_size (`Union[int, Tuple]`): Size of the convolving kernel. stride (`Union[int, Tuple]`): Stride of the convolution. padding (`Union[int, Tuple, str]`): Padding added to all four sides of the input. dilation (`int`): Spacing between kernel elements. groups (`int`): Number of blocked connections from input channels to output channels. bias (`bool`): If True, adds a learnable bias to the output. """ def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: '_size_2_t', stride: '_size_2_t'=1, padding: 'Union[str, _size_2_t]'=0, dilation: '_size_2_t'=1, groups: 'int'=1, bias: 'bool'=True) ->None: super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) _ = padding def forward(self, x: 'torch.Tensor') ->torch.Tensor: return conv2d_same(x, self.weight, self.bias, self.stride, self. padding, self.dilation, self.groups) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from typing import List from typing import Union from torch import nn import torch.nn.functional as F from typing import Tuple import torch.cuda from typing import Optional from torch.nn.common_types import _size_2_t assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 784 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 7 % 7 x0 = xindex % 7 x2 = xindex // 49 x4 = xindex tmp0 = -1 + x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = -1 + x0 tmp6 = tmp5 >= tmp1 tmp7 = tmp5 < tmp3 tmp8 = tmp2 & tmp4 tmp9 = tmp8 & tmp6 tmp10 = tmp9 & tmp7 tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask, other=0.0) tl.store(out_ptr0 + x4, tmp11, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(784)](primals_3, buf0, 784, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 return buf2, primals_1, buf0 def get_same_padding(x: 'int', k: 'int', s: 'int', d: 'int') ->int: """ Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution Args: x(`Int`): Input tensor shape. k(`Int`): Convolution kernel size. s(`Int`): Convolution stride parameter. d(`Int`): Convolution dilation parameter. Returns: (`Int`): Padding value for 'SAME' padding. """ return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) def pad_same(x: 'torch.Tensor', k: 'List[int]', s: 'List[int]', d: 'List[int]'=(1, 1), value: 'float'=0) ->torch.Tensor: """ Dynamically pad input x with 'SAME' padding for conv with specified args Args: x(`torch.Tensor`): Input tensor. k(`List[Int]`): Convolution kernel sizes. s(`List[Int]`): Convolution stride parameters. d(`List[Int]`): Convolution dilation parameter. value(`Float`): Value for padding. Returns: (`torch.Tensor`): Output Tensor for conv with 'SAME' padding. """ ih, iw = x.size()[-2:] pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) if pad_h > 0 or pad_w > 0: x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) return x def conv2d_same(x: 'torch.Tensor', weight: 'torch.Tensor', bias: 'Optional[torch.Tensor]'=None, stride: 'Tuple[int, int]'=(1, 1), padding: 'Tuple[int, int]'=(0, 0), dilation: 'Tuple[int, int]'=(1, 1), groups: 'int'=1): """ Tensorflow like 'SAME' convolution function for 2D convolutions. """ x = pad_same(x, weight.shape[-2:], stride, dilation) _ = padding return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) class Conv2dSameNew(nn.Conv2d): """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions. Args: in_channels (`int`): Number of channels in the input image. out_channels (`int`): Number of channels produced by the convolution. kernel_size (`Union[int, Tuple]`): Size of the convolving kernel. stride (`Union[int, Tuple]`): Stride of the convolution. padding (`Union[int, Tuple, str]`): Padding added to all four sides of the input. dilation (`int`): Spacing between kernel elements. groups (`int`): Number of blocked connections from input channels to output channels. bias (`bool`): If True, adds a learnable bias to the output. """ def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size: '_size_2_t', stride: '_size_2_t'=1, padding: 'Union[str, _size_2_t]'=0, dilation: '_size_2_t'=1, groups: 'int'=1, bias: 'bool'=True) ->None: super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) _ = padding def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LoveEachDay/towhee
Conv2dSame
false
11,653
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
HardSwish
import torch from torch import nn import torch.nn.functional as F import torch.cuda def hard_swish(x: 'torch.Tensor', inplace: 'bool'=False) ->torch.Tensor: inner = F.relu6(x + 3.0).div_(6.0) return x.mul_(inner) if inplace else x.mul(inner) class HardSwish(nn.Module): """ HardSwish activiation layer. Applies the hardswish function, element-wise. Described in: https://arxiv.org/abs/1905.02244. Args: inplace(`Bool`): whether use inplace version. Returns: (`torch.Tensor`) output tensor after activation. """ def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.inplace = inplace def forward(self, x: 'torch.Tensor') ->torch.Tensor: return hard_swish(x, self.inplace) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.functional as F import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tmp9 = tmp0 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def hard_swish(x: 'torch.Tensor', inplace: 'bool'=False) ->torch.Tensor: inner = F.relu6(x + 3.0).div_(6.0) return x.mul_(inner) if inplace else x.mul(inner) class HardSwishNew(nn.Module): """ HardSwish activiation layer. Applies the hardswish function, element-wise. Described in: https://arxiv.org/abs/1905.02244. Args: inplace(`Bool`): whether use inplace version. Returns: (`torch.Tensor`) output tensor after activation. """ def __init__(self, inplace: 'bool'=False) ->None: super().__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LoveEachDay/towhee
HardSwish
false
11,654
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
CMlp
import torch from torch import nn import torch.cuda def conv_1x1x1(inp, oup, groups=1): return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) class CMlp(nn.Module): """ CMlp """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = conv_1x1x1(in_features, hidden_features) self.act = act_layer() self.fc2 = conv_1x1x1(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_gelu_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 64 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1, 1), (4, 1, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_gelu_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(reinterpret_tensor(buf2, (1, 4, 4, 4, 4), (0, 64, 16, 4, 1), 0), primals_4, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None) assert_size_stride(buf3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_1[grid(256)](buf4, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf1, reinterpret_tensor(buf2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) def conv_1x1x1(inp, oup, groups=1): return nn.Conv3d(inp, oup, (1, 1, 1), (1, 1, 1), (0, 0, 0), groups=groups) class CMlpNew(nn.Module): """ CMlp """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = conv_1x1x1(in_features, hidden_features) self.act = act_layer() self.fc2 = conv_1x1x1(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LoveEachDay/towhee
CMlp
false
11,655
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
Upsampler
import math import torch import torch.utils.data from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class Upsampler(torch.nn.Module): def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True): super(Upsampler, self).__init__() modules = [] for _ in range(int(math.log(scale, 2))): modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None)) modules.append(torch.nn.PixelShuffle(2)) if bn: modules.append(torch.nn.BatchNorm2d(n_feat)) self.up = torch.nn.Sequential(*modules) self.activation = act if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): out = self.up(x) if self.activation is not None: out = self.act(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'scale': 1.0, 'n_feat': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.utils.data from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 0) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp5 = tmp4 * tmp0 tmp6 = tl.where(tmp2, tmp0, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf0, primals_1 class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class UpsamplerNew(torch.nn.Module): def __init__(self, scale, n_feat, bn=False, act='prelu', bias=True): super(UpsamplerNew, self).__init__() modules = [] for _ in range(int(math.log(scale, 2))): modules.append(ConvBlock(n_feat, 4 * n_feat, 3, 1, 1, bias, activation=None, norm=None)) modules.append(torch.nn.PixelShuffle(2)) if bn: modules.append(torch.nn.BatchNorm2d(n_feat)) self.up = torch.nn.Sequential(*modules) self.activation = act if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, input_0): primals_2 = self.act.weight primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
HamsterBiz/iSeeBetter
Upsampler
false
11,656
[ "MIT" ]
0
a71cee61583bdedab1f3b368e2cb7dc5ad969aed
https://github.com/HamsterBiz/iSeeBetter/tree/a71cee61583bdedab1f3b368e2cb7dc5ad969aed
NextSentencePrediction
import torch import torch.nn as nn class NextSentencePrediction(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, x): return self.softmax(self.linear(x[:, 0])) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0) del buf1 triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class NextSentencePredictionNew(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
JacobTyo/Syntax-Encoding_EMNLP2018
NextSentencePrediction
false
11,657
[ "MIT" ]
0
5aed2fdd01dc7d0baebbd555c97a25fedbde0c39
https://github.com/JacobTyo/Syntax-Encoding_EMNLP2018/tree/5aed2fdd01dc7d0baebbd555c97a25fedbde0c39
Attention
import math import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): """ Compute 'Scaled Dot Product Attention """ def forward(self, query, key, value, mask=None, dropout=None): scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query .size(-1)) if mask is not None: scores = scores.masked_fill(mask == 0, -1000000000.0) p_attn = F.softmax(scores, dim=-1) if dropout is not None: p_attn = dropout(p_attn) return torch.matmul(p_attn, value), p_attn def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0) del buf1 extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3 ) del arg2_1 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2 class AttentionNew(nn.Module): """ Compute 'Scaled Dot Product Attention """ def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
JacobTyo/Syntax-Encoding_EMNLP2018
Attention
false
11,658
[ "MIT" ]
0
5aed2fdd01dc7d0baebbd555c97a25fedbde0c39
https://github.com/JacobTyo/Syntax-Encoding_EMNLP2018/tree/5aed2fdd01dc7d0baebbd555c97a25fedbde0c39
ConvMlp
import torch from torch import nn import torch.cuda class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity( ) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class ConvMlpNew(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity( ) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LoveEachDay/towhee
ConvMlp
false
11,659
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
InnerProductDecoder
import torch import torch.nn as nn import torch.nn.functional as F class InnerProductDecoder(nn.Module): def __init__(self, activation=torch.sigmoid, dropout=0.1): super(InnerProductDecoder, self).__init__() self.dropout = dropout self.activation = activation def forward(self, z): z = F.dropout(z, self.dropout) adj = self.activation(torch.mm(z, z.t())) return adj def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tl.store(in_out_ptr0 + x0, tmp1, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten.native_dropout.default(arg0_1, 0.1, True) del arg0_1 buf1 = buf0[0] del buf0 buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(buf1, (4, 4), (1, 4), 0), out=buf3) del buf1 buf4 = buf3 del buf3 get_raw_stream(0) triton_poi_fused_sigmoid_0[grid(16)](buf4, 16, XBLOCK=16, num_warps =1, num_stages=1) return buf4, class InnerProductDecoderNew(nn.Module): def __init__(self, activation=torch.sigmoid, dropout=0.1): super(InnerProductDecoderNew, self).__init__() self.dropout = dropout self.activation = activation def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
LymanSong/suwon_bus_stop_competition
InnerProductDecoder
false
11,660
[ "MIT" ]
0
42297c8cfb0f109f28d8aeead097a57bb5d6be53
https://github.com/LymanSong/suwon_bus_stop_competition/tree/42297c8cfb0f109f28d8aeead097a57bb5d6be53
CrossAttention
import torch from torch import nn import torch.cuda class MultiHeadAttention(nn.Module): """ Multi head attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.attention = nn.MultiheadAttention(embed_dim=num_q_channels, num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels, dropout=dropout, batch_first=True) def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None): """ Forward function. Args: x_q (`Tensor`): Query embeddings. x_kv (`Tensor`): Key embeddings. Key equals value. pad_mask (`int`): Padding mask. attn_mask (`nn.Module`): Attention mask. """ return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask, attn_mask=attn_mask)[0] class CrossAttention(nn.Module): """ Cross attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.q_norm = nn.LayerNorm(num_q_channels) self.kv_norm = nn.LayerNorm(num_kv_channels) self.attention = MultiHeadAttention(num_q_channels=num_q_channels, num_kv_channels=num_kv_channels, num_heads=num_heads, dropout= dropout) def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None): x_q = self.q_norm(x_q) x_kv = self.kv_norm(x_kv) return self.attention(x_q, x_kv, pad_mask=pad_mask, attn_mask=attn_mask ) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'num_q_channels': 4, 'num_kv_channels': 4, 'num_heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask) tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (12, 4), (4, 1)) assert_size_stride(primals_8, (12,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(4)](primals_3, buf0, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32) buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32) triton_poi_fused_native_layer_norm_0[grid(4)](primals_6, buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_3, buf0, buf1, primals_1, primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf0 del buf1 del primals_1 del primals_2 buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4 ), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](primals_6, buf2, buf3, primals_4, primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 del buf3 del primals_4 del primals_5 buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 4), buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 16), alpha= 1, beta=1, out=buf7) buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(primals_8, (4,), (1,), 8), buf6, reinterpret_tensor(primals_7, (4, 4), (1, 4), 32), alpha= 1, beta=1, out=buf8) buf9 = reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 16), 0) del buf5 triton_poi_fused_mul_2[grid(16)](buf9, primals_8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_8 buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf9, reinterpret_tensor(buf7, (4, 1, 4), (1, 1, 4), 0), out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_3[grid(64)](buf10, buf11, 64, XBLOCK=64, num_warps=1, num_stages=1) buf12 = buf10 del buf10 triton_poi_fused__softmax_4[grid(64)](buf11, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf11 buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(buf12, reinterpret_tensor(buf8, (4, 4, 1), (1, 4, 1), 0), out=buf13) buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) triton_poi_fused_clone_5[grid(4, 4)](buf13, buf14, 4, 4, XBLOCK=4, YBLOCK=4, num_warps=1, num_stages=1) buf15 = reinterpret_tensor(buf13, (4, 4), (4, 1), 0) del buf13 extern_kernels.addmm(primals_10, reinterpret_tensor(buf14, (4, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf15) del primals_10 return buf15, primals_3, primals_6, buf4, buf6, buf12, reinterpret_tensor( buf14, (4, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf8, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf9, (4, 1, 4), (1, 1, 4), 0 ), reinterpret_tensor(buf7, (4, 4, 1), (1, 4, 1), 0 ), reinterpret_tensor(primals_7, (4, 4), (4, 1), 32 ), reinterpret_tensor(primals_7, (4, 4), (4, 1), 16 ), reinterpret_tensor(primals_7, (4, 4), (4, 1), 0) class MultiHeadAttention(nn.Module): """ Multi head attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.attention = nn.MultiheadAttention(embed_dim=num_q_channels, num_heads=num_heads, kdim=num_kv_channels, vdim=num_kv_channels, dropout=dropout, batch_first=True) def forward(self, x_q, x_kv, pad_mask=None, attn_mask=None): """ Forward function. Args: x_q (`Tensor`): Query embeddings. x_kv (`Tensor`): Key embeddings. Key equals value. pad_mask (`int`): Padding mask. attn_mask (`nn.Module`): Attention mask. """ return self.attention(x_q, x_kv, x_kv, key_padding_mask=pad_mask, attn_mask=attn_mask)[0] class CrossAttentionNew(nn.Module): """ Cross attention for Perceiver https://arxiv.org/pdf/2103.03206.pdf. Args: num_q_channels (`int`): Number of q channels. num_kv_channels (`int`): Number of k or v channels. k has the same channels as v. num_heads (`int`): Number of parallel attention heads. dropout (`nn.Module`): Dropout probability. """ def __init__(self, num_q_channels: 'int', num_kv_channels: 'int', num_heads: 'int', dropout: 'float'): super().__init__() self.q_norm = nn.LayerNorm(num_q_channels) self.kv_norm = nn.LayerNorm(num_kv_channels) self.attention = MultiHeadAttention(num_q_channels=num_q_channels, num_kv_channels=num_kv_channels, num_heads=num_heads, dropout= dropout) def forward(self, input_0, input_1): primals_1 = self.q_norm.weight primals_2 = self.q_norm.bias primals_4 = self.kv_norm.weight primals_5 = self.kv_norm.bias primals_7 = self.attention.attention.in_proj_weight primals_8 = self.attention.attention.in_proj_bias primals_3 = self.attention.attention.out_proj.weight primals_10 = self.attention.attention.out_proj.bias primals_6 = input_0 primals_9 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
LoveEachDay/towhee
CrossAttention
false
11,661
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
TVLoss
import torch from torch import nn from torch.nn import functional as F class TVLoss(nn.Module): """L2 total variation loss, as in Mahendran et al.""" def forward(self, input): input = F.pad(input, (0, 1, 0, 1), 'replicate') x_diff = input[..., :-1, 1:] - input[..., :-1, :-1] y_diff = input[..., 1:, :-1] - input[..., :-1, :-1] return (x_diff ** 2 + y_diff ** 2).mean() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex % 4 r1 = rindex // 4 % 4 r2 = rindex // 16 tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 16 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None) tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 16 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None) tmp4 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 + r1 < 3)) + 16 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp5 = tmp4 - tmp1 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp8 = tl.broadcast_to(tmp7, [RBLOCK]) tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0)) tmp11 = 256.0 tmp12 = tmp10 / tmp11 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_mean_pow_sub_0[grid(1)](buf1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 return buf1, class TVLossNew(nn.Module): """L2 total variation loss, as in Mahendran et al.""" def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MED-YAHYAOUI/style-transfer-pytorch
TVLoss
false
11,662
[ "MIT" ]
0
867a6a45d964c151d6b94f50153cf535385c9078
https://github.com/MED-YAHYAOUI/style-transfer-pytorch/tree/867a6a45d964c151d6b94f50153cf535385c9078
AttentionPool2d
import torch from torch import nn import torch.cuda class AttentionPool2d(nn.Module): """ Attention """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute( 2, 0, 1) x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) x = x + self.positional_embedding[:, None, :] x, _ = nn.functional.multi_head_attention_forward(query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self. num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self .k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.c_proj. weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False) return x[0] def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'spacial_dim': 4, 'embed_dim': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_add_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp15 = tl.load(in_ptr2 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x3, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = 16.0 tmp7 = tmp5 / tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 17, tl.int64) tmp13 = tl.load(in_ptr1 + (16 * x3 + (-1 + x2)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp16 = tmp14 + tmp15 tl.store(out_ptr0 + x4, tmp16, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (-8 + x0), tmp11 & xmask, eviction_policy= 'evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x0, tmp16, xmask) @triton.jit def triton_poi_fused_mul_transpose_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-8 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_poi_fused_mul_transpose_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 17 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 tmp0 = tl.load(in_ptr0 + (y3 + 16 * x2), xmask & ymask, eviction_policy ='evict_last') tmp1 = 4 + y0 tl.full([1, 1], 0, tl.int64) tmp4 = tl.full([1, 1], 4, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr1 + tl.broadcast_to(4 + y0, [XBLOCK, YBLOCK]), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tmp1 >= tmp4 tmp8 = tl.full([1, 1], 8, tl.int64) tmp9 = tmp1 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tl.load(in_ptr2 + tl.broadcast_to(y0, [XBLOCK, YBLOCK]), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp12 = tmp1 >= tmp8 tl.full([1, 1], 12, tl.int64) tmp15 = tl.load(in_ptr3 + tl.broadcast_to(-4 + y0, [XBLOCK, YBLOCK]), tmp12 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp10, tmp11, tmp15) tmp17 = tl.where(tmp5, tmp6, tmp16) tmp18 = tmp0 + tmp17 tmp19 = 1.0 tmp20 = tmp18 * tmp19 tl.store(out_ptr0 + (x2 + 17 * y3), tmp20, xmask & ymask) tl.store(out_ptr1 + (y3 + 16 * x2), tmp20, xmask & ymask) @triton.jit def triton_per_fused__safe_softmax_5(in_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 272 rnumel = 17 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex x2 = xindex % 68 x3 = xindex // 68 tmp0 = tl.load(in_ptr0 + (r1 + 17 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = float('-inf') tmp12 = tmp0 == tmp11 tmp13 = tmp12 == 0 tmp14 = tmp13.to(tl.int64) tmp15 = tmp14 != 0 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = triton_helpers.any(tmp18, 1)[:, None] tmp20 = tmp19 == 0 tmp21 = tmp6 / tmp10 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp22, tmp21) tl.store(out_ptr3 + (r1 + 17 * x2 + 1184 * x3), tmp23, rmask & xmask) @triton.jit def triton_poi_fused_bmm_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4624 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 289 x1 = xindex // 289 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 289 * (x1 % 4) + 1184 * (x1 // 4)), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 17 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 17 * x1), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (17, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mean_0[grid(16)](primals_1, buf0, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) buf1 = empty_strided_cuda((17, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_cat_1[grid(272)](buf0, primals_1, primals_2, buf1, 272, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_1 del primals_2 buf2 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2) buf3 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((12,), (1,), torch.float32) triton_poi_fused_cat_2[grid(12)](primals_6, primals_7, primals_8, buf4, 12, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((68, 4), (4, 1), torch.float32) extern_kernels.addmm(reinterpret_tensor(buf4, (4,), (1,), 8), reinterpret_tensor(buf1, (68, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta =1, out=buf5) del buf4 buf6 = empty_strided_cuda((4, 4, 17, 1), (68, 17, 1, 1), torch.float32) buf17 = empty_strided_cuda((16, 1, 17), (1, 1, 16), torch.float32) triton_poi_fused_mul_transpose_3[grid(16, 17)](buf2, primals_6, primals_7, primals_8, buf6, buf17, 16, 17, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) buf7 = reinterpret_tensor(buf2, (4, 4, 1, 17), (68, 17, 17, 1), 0) del buf2 buf18 = empty_strided_cuda((16, 17, 1), (1, 16, 1), torch.float32) triton_poi_fused_mul_transpose_4[grid(16, 17)](buf3, primals_6, primals_7, primals_8, buf7, buf18, 16, 17, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1) del buf3 del primals_6 del primals_7 del primals_8 buf8 = empty_strided_cuda((16, 17, 17), (289, 17, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf6, (16, 17, 1), (17, 1, 0), 0), reinterpret_tensor(buf7, (16, 1, 17), (17, 0, 1), 0), out=buf8) buf12 = empty_strided_cuda((4, 4, 17, 17), (1184, 289, 17, 1), torch.float32) triton_per_fused__safe_softmax_5[grid(272)](buf8, buf12, 272, 17, XBLOCK=8, num_warps=2, num_stages=1) buf13 = buf8 del buf8 triton_poi_fused_bmm_6[grid(4624)](buf12, buf13, 4624, XBLOCK=256, num_warps=4, num_stages=1) buf14 = reinterpret_tensor(buf7, (16, 17, 1), (17, 1, 1), 0) del buf7 extern_kernels.bmm(buf13, reinterpret_tensor(buf5, (16, 17, 1), (1, 16, 0), 0), out=buf14) del buf13 buf15 = reinterpret_tensor(buf6, (17, 4, 4, 1), (16, 4, 1, 1), 0) del buf6 triton_poi_fused_clone_7[grid(17, 16)](buf14, buf15, 17, 16, XBLOCK =16, YBLOCK=32, num_warps=4, num_stages=1) buf16 = reinterpret_tensor(buf14, (68, 4), (4, 1), 0) del buf14 extern_kernels.addmm(primals_10, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf16) del primals_10 return reinterpret_tensor(buf16, (4, 4), (4, 1), 0), reinterpret_tensor( buf1, (68, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (68, 4), (4, 1), 0), primals_9, reinterpret_tensor(buf5, (16, 1, 17), (1, 1, 16), 0), buf17, buf18, primals_5, primals_4, primals_3 class AttentionPool2dNew(nn.Module): """ Attention """ def __init__(self, spacial_dim: 'int', embed_dim: 'int', num_heads: 'int', output_dim: 'int'=None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, input_0): primals_2 = self.positional_embedding primals_3 = self.k_proj.weight primals_6 = self.k_proj.bias primals_4 = self.q_proj.weight primals_7 = self.q_proj.bias primals_5 = self.v_proj.weight primals_8 = self.v_proj.bias primals_9 = self.c_proj.weight primals_10 = self.c_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
LoveEachDay/towhee
AttentionPool2d
false
11,663
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
ConvNetsModel
import torch import torch.nn as nn import torch.nn.functional as F class ConvNetsModel(nn.Module): def __init__(self, num_classes, cross_entropy_loss=False, kernel_size=3, channel_size1=32, channel_size2=64, dropout=False): super(ConvNetsModel, self).__init__() self.cross_entropy_loss = cross_entropy_loss self.kernel_size = kernel_size self.w_dropout = dropout self.channel_size1 = channel_size1 self.channel_size2 = channel_size2 self.cn1 = nn.Conv2d(in_channels=3, out_channels=channel_size1, kernel_size=(kernel_size, kernel_size)) self.cn2 = nn.Conv2d(in_channels=channel_size1, out_channels= channel_size1, kernel_size=(kernel_size, kernel_size)) self.pool = nn.MaxPool2d((2, 2)) if dropout: self.dropout = nn.Dropout(p=0.3) self.dropout2 = nn.Dropout(p=0.6) self.cn3 = nn.Conv2d(in_channels=channel_size1, out_channels= channel_size2, kernel_size=(kernel_size, kernel_size)) self.cn4 = nn.Conv2d(in_channels=channel_size2, out_channels= channel_size2, kernel_size=(kernel_size, kernel_size)) self.new_image_dim = ((32 + 2 * (-kernel_size + 1)) // 2 + 2 * (- kernel_size + 1)) // 2 self.fc1 = nn.Linear(channel_size2 * self.new_image_dim * self. new_image_dim, 512) self.fc2 = nn.Linear(512, num_classes) if not self.cross_entropy_loss: self.softmax = nn.Softmax() def forward(self, x): x = self.cn1(x) x = F.relu(x) x = self.cn2(x) x = self.pool(x) if self.w_dropout: x = self.dropout(x) x = F.relu(x) x = self.cn3(x) x = F.relu(x) x = self.cn4(x) x = self.pool(x) if self.w_dropout: x = self.dropout(x) x = F.relu(x) x = x.view(-1, self.channel_size2 * self.new_image_dim * self. new_image_dim) x = self.fc1(x) if self.w_dropout: x = self.dropout2(x) x = F.relu(x) x = self.fc2(x) if not self.cross_entropy_loss: x = self.softmax(x) return x def get_inputs(): return [torch.rand([4, 3, 32, 32])] def get_init_inputs(): return [[], {'num_classes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 115200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 900 % 32 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 784 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 25088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 14 x1 = xindex // 14 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tl.store(out_ptr0 + x2, tmp15, xmask) tl.store(out_ptr1 + x2, tmp18, xmask) @triton.jit def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 144 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 25600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 100 % 64 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_5(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x3 = xindex // 5 x2 = xindex // 1600 x4 = xindex % 1600 x5 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x3), xmask, eviction_policy ='evict_last') tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x3), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x3), xmask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1], 1, tl.int8) tmp4 = tl.full([1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tmp17 = tl.full([1], 0, tl.int32) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp19 = 0.0 tmp20 = tmp18 <= tmp19 tl.store(out_ptr0 + (x4 + 1664 * x2), tmp15, xmask) tl.store(out_ptr1 + x5, tmp18, xmask) tl.store(out_ptr2 + (x4 + 1664 * x2), tmp20, xmask) @triton.jit def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (512, 1600), (1600, 1)) assert_size_stride(primals_11, (512,), (1,)) assert_size_stride(primals_12, (4, 512), (512, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 30, 30), (28800, 900, 30, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(115200)](buf1, primals_2, 115200, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 28, 28), (25088, 784, 28, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(100352)](buf3, primals_5, 100352, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.int8) buf5 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_relu_2[grid(25088)](buf3, buf4, buf5, 25088, XBLOCK=128, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 12, 12), (9216, 144, 12, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_3[grid(36864)](buf7, primals_7, 36864, XBLOCK=512, num_warps=4, num_stages=1) del primals_7 buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 64, 10, 10), (6400, 100, 10, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_4[grid(25600)](buf9, primals_9, 25600, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf10 = empty_strided_cuda((4, 64, 5, 5), (1664, 25, 5, 1), torch.int8) buf11 = empty_strided_cuda((4, 64, 5, 5), (1600, 25, 5, 1), torch. float32) buf17 = empty_strided_cuda((4, 64, 5, 5), (1664, 25, 5, 1), torch.bool) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_5[grid (6400)](buf9, buf10, buf11, buf17, 6400, XBLOCK=256, num_warps= 4, num_stages=1) buf12 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (4, 1600), (1600, 1), 0 ), reinterpret_tensor(primals_10, (1600, 512), (1, 1600), 0), out=buf12) buf13 = buf12 del buf12 triton_poi_fused_relu_6[grid(2048)](buf13, primals_11, 2048, XBLOCK =256, num_warps=4, num_stages=1) del primals_11 buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf13, reinterpret_tensor( primals_12, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf14) del primals_13 buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_7[grid(16)](buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = buf14 del buf14 triton_poi_fused__softmax_8[grid(16)](buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf15 return (buf16, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf4, buf5, buf7, buf9, buf10, reinterpret_tensor(buf11, (4, 1600), (1600, 1), 0), buf13, buf16, primals_12, primals_10, buf17) class ConvNetsModelNew(nn.Module): def __init__(self, num_classes, cross_entropy_loss=False, kernel_size=3, channel_size1=32, channel_size2=64, dropout=False): super(ConvNetsModelNew, self).__init__() self.cross_entropy_loss = cross_entropy_loss self.kernel_size = kernel_size self.w_dropout = dropout self.channel_size1 = channel_size1 self.channel_size2 = channel_size2 self.cn1 = nn.Conv2d(in_channels=3, out_channels=channel_size1, kernel_size=(kernel_size, kernel_size)) self.cn2 = nn.Conv2d(in_channels=channel_size1, out_channels= channel_size1, kernel_size=(kernel_size, kernel_size)) self.pool = nn.MaxPool2d((2, 2)) if dropout: self.dropout = nn.Dropout(p=0.3) self.dropout2 = nn.Dropout(p=0.6) self.cn3 = nn.Conv2d(in_channels=channel_size1, out_channels= channel_size2, kernel_size=(kernel_size, kernel_size)) self.cn4 = nn.Conv2d(in_channels=channel_size2, out_channels= channel_size2, kernel_size=(kernel_size, kernel_size)) self.new_image_dim = ((32 + 2 * (-kernel_size + 1)) // 2 + 2 * (- kernel_size + 1)) // 2 self.fc1 = nn.Linear(channel_size2 * self.new_image_dim * self. new_image_dim, 512) self.fc2 = nn.Linear(512, num_classes) if not self.cross_entropy_loss: self.softmax = nn.Softmax() def forward(self, input_0): primals_1 = self.cn1.weight primals_2 = self.cn1.bias primals_4 = self.cn2.weight primals_5 = self.cn2.bias primals_6 = self.cn3.weight primals_7 = self.cn3.bias primals_8 = self.cn4.weight primals_9 = self.cn4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
LidiaAlecci/ConvNet
ConvNetsModel
false
11,664
[ "MIT" ]
0
23bc0919edfa346440588f79bc86d9c5f5fcc4d2
https://github.com/LidiaAlecci/ConvNet/tree/23bc0919edfa346440588f79bc86d9c5f5fcc4d2
ClassificationModel
import torch import torch.nn as nn class ClassificationModel(nn.Module): def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256): super(ClassificationModel, self).__init__() self.num_classes = num_classes self.num_anchors = num_anchors self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1) self.output_act = nn.Sigmoid() def forward(self, x): out = self.conv1(x) out = self.act1(out) out = self.conv2(out) out = self.act2(out) out = self.conv3(out) out = self.act3(out) out = self.conv4(out) out = self.act4(out) out = self.output(out) out = self.output_act(out) out1 = out.permute(0, 2, 3, 1) batch_size, width, height, _channels = out1.shape out2 = out1.view(batch_size, width, height, self.num_anchors, self. num_classes) return out2.contiguous().view(x.shape[0], -1, self.num_classes) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_features_in': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask) tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_clone_convolution_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 46080 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 720 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.sigmoid(tmp2) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (720, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_11, (720,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32 ) get_raw_stream(0) triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32) triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_4, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_6, buf3, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_2[grid(65536, 9)](primals_8, buf4, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = empty_strided_cuda((720, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_3[grid(184320, 9)](primals_10, buf5, 184320, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf6 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_4[grid(16384)](buf7, primals_2, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf8 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf9 = buf8 del buf8 triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf10 = extern_kernels.convolution(buf9, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_4[grid(16384)](buf11, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf12 = extern_kernels.convolution(buf11, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 4, 4), (4096, 1, 1024, 256)) buf13 = buf12 del buf12 triton_poi_fused_convolution_relu_4[grid(16384)](buf13, primals_9, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf14 = extern_kernels.convolution(buf13, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 720, 4, 4), (11520, 1, 2880, 720)) buf15 = buf14 del buf14 buf16 = empty_strided_cuda((4, 4, 4, 9, 80), (11520, 2880, 720, 80, 1), torch.float32) triton_poi_fused_clone_convolution_5[grid(46080)](buf15, primals_11, buf16, 46080, XBLOCK=512, num_warps=4, num_stages=1) del primals_11 return reinterpret_tensor(buf16, (4, 144, 80), (11520, 80, 1), 0 ), buf0, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf11, buf13, buf15 class ClassificationModelNew(nn.Module): def __init__(self, num_features_in, num_anchors=9, num_classes=80, prior=0.01, feature_size=256): super(ClassificationModelNew, self).__init__() self.num_classes = num_classes self.num_anchors = num_anchors self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1) self.act1 = nn.ReLU() self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act2 = nn.ReLU() self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act3 = nn.ReLU() self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1) self.act4 = nn.ReLU() self.output = nn.Conv2d(feature_size, num_anchors * num_classes, kernel_size=3, padding=1) self.output_act = nn.Sigmoid() def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.output.weight primals_11 = self.output.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Hyojin021/auto_labeling
ClassificationModel
false
11,665
[ "Apache-2.0" ]
0
1ccf0cd1c5adf34692751553a988aa0fcf4efefb
https://github.com/Hyojin021/auto_labeling/tree/1ccf0cd1c5adf34692751553a988aa0fcf4efefb
TemporalDecay
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter class TemporalDecay(nn.Module): def __init__(self, input_size, rnn_hid_size): super(TemporalDecay, self).__init__() self.rnn_hid_size = rnn_hid_size self.build(input_size) def build(self, input_size): self.W = Parameter(torch.Tensor(self.rnn_hid_size, input_size)) self.b = Parameter(torch.Tensor(self.rnn_hid_size)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(0)) self.W.data.uniform_(-stdv, stdv) if self.b is not None: self.b.data.uniform_(-stdv, stdv) def forward(self, d): gamma = F.relu(F.linear(d, self.W, self.b)) gamma = torch.exp(-gamma) return gamma def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'rnn_hid_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_exp_neg_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = -tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = 0.0 tmp8 = tmp4 <= tmp7 tl.store(out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr1 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_exp_neg_relu_threshold_backward_0[grid(256)](buf0, primals_2, buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf2 class TemporalDecayNew(nn.Module): def __init__(self, input_size, rnn_hid_size): super(TemporalDecayNew, self).__init__() self.rnn_hid_size = rnn_hid_size self.build(input_size) def build(self, input_size): self.W = Parameter(torch.Tensor(self.rnn_hid_size, input_size)) self.b = Parameter(torch.Tensor(self.rnn_hid_size)) self.reset_parameters() def reset_parameters(self): stdv = 1.0 / math.sqrt(self.W.size(0)) self.W.data.uniform_(-stdv, stdv) if self.b is not None: self.b.data.uniform_(-stdv, stdv) def forward(self, input_0): primals_1 = self.W primals_2 = self.b primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LyapunovStability/BRITS
TemporalDecay
false
11,666
[ "MIT" ]
0
92a889dd5946aae215d61b1854d9767c6f7fcf2c
https://github.com/LyapunovStability/BRITS/tree/92a889dd5946aae215d61b1854d9767c6f7fcf2c
CSDN_Tem
import torch import torch.nn as nn class CSDN_Tem(nn.Module): def __init__(self, in_ch, out_ch): super(CSDN_Tem, self).__init__() self.depth_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=3, padding=1, groups=in_ch) self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1) def forward(self, input): out = self.depth_conv(input) out = self.point_conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'out_ch': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class CSDN_TemNew(nn.Module): def __init__(self, in_ch, out_ch): super(CSDN_TemNew, self).__init__() self.depth_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=3, padding=1, groups=in_ch) self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1) def forward(self, input_0): primals_1 = self.depth_conv.weight primals_2 = self.depth_conv.bias primals_4 = self.point_conv.weight primals_5 = self.point_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Lundez/londogard-backend
CSDN_Tem
false
11,667
[ "MIT" ]
0
90d9e405b832c2157e6fde00f58b9312cfc4ddbc
https://github.com/Lundez/londogard-backend/tree/90d9e405b832c2157e6fde00f58b9312cfc4ddbc
MultinomialCELoss
import torch import torch.nn as nn class MultinomialCELoss(nn.Module): def __init__(self): super(MultinomialCELoss, self).__init__() def forward(self, x, y): x = x + 1e-08 x = torch.log(x) zlogz = y * x loss = -zlogz.sum() loss /= x.shape[0] * x.shape[2] * x.shape[3] return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_log_mul_neg_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = 1e-08 tmp3 = tmp1 + tmp2 tmp4 = tl_math.log(tmp3) tmp5 = tmp0 * tmp4 tmp6 = tl.broadcast_to(tmp5, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = -tmp8 tmp10 = 0.015625 tmp11 = tmp9 * tmp10 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_log_mul_neg_sum_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class MultinomialCELossNew(nn.Module): def __init__(self): super(MultinomialCELossNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MMujtabaRoohani/FlowerColorizer-PyTorch
MultinomialCELoss
false
11,668
[ "MIT" ]
0
4c9c4c954a38babe1f10f816f8406eb4ab998842
https://github.com/MMujtabaRoohani/FlowerColorizer-PyTorch/tree/4c9c4c954a38babe1f10f816f8406eb4ab998842
DownBlock
import torch import torch.utils.data from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias =True, activation='prelu', norm=None): super(DownBlock, self).__init__() self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, x): l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(16)](buf1, primals_2, primals_4, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_1[grid(256)](buf4, primals_6, primals_7, primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 1, 1), (4, 1, 1, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_2[grid(16)](buf7, primals_9, primals_10, buf2, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_9 return (buf8, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, buf1, buf2, buf4, buf5, buf7) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class DownBlockNew(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias =True, activation='prelu', norm=None): super(DownBlockNew, self).__init__() self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, input_0): primals_1 = self.down_conv1.conv.weight primals_2 = self.down_conv1.conv.bias primals_4 = self.down_conv1.act.weight primals_5 = self.down_conv2.deconv.weight primals_6 = self.down_conv2.deconv.bias primals_7 = self.down_conv2.act.weight primals_8 = self.down_conv3.conv.weight primals_9 = self.down_conv3.conv.bias primals_10 = self.down_conv3.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
HamsterBiz/iSeeBetter
DownBlock
false
11,669
[ "MIT" ]
0
a71cee61583bdedab1f3b368e2cb7dc5ad969aed
https://github.com/HamsterBiz/iSeeBetter/tree/a71cee61583bdedab1f3b368e2cb7dc5ad969aed
BoundSoftmaxImpl
import torch import torch.nn as nn class BoundSoftmaxImpl(nn.Module): def __init__(self, axis): super().__init__() self.axis = axis def forward(self, x): max_x = torch.max(x, dim=self.axis).values assert self.axis == int(self.axis) x = torch.exp(x - max_x.unsqueeze(self.axis)) s = torch.sum(x, dim=self.axis, keepdim=True) return x / s def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'axis': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_div_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_sub_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_div_sum_1[grid(1024)](buf0, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del buf0 return buf1, class BoundSoftmaxImplNew(nn.Module): def __init__(self, axis): super().__init__() self.axis = axis def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mahoumaru/auto_LiRPA
BoundSoftmaxImpl
false
11,670
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
RegressionHead
import abc import torch import torch.nn as nn import torch.utils.data.dataset class BaseHead(nn.Module, metaclass=abc.ABCMeta): pass class RegressionHead(BaseHead): def __init__(self, hidden_size, hidden_dropout_prob): """From RobertaClassificationHead""" super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(hidden_dropout_prob) self.out_proj = nn.Linear(hidden_size, 1) def forward(self, pooled): x = self.dropout(pooled) x = self.dense(x) x = torch.tanh(x) x = self.dropout(x) scores = self.out_proj(x) return scores def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'hidden_dropout_prob': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import abc import torch.nn as nn import torch.utils.data.dataset assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (1, 4), (4, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, primals_4 class BaseHead(nn.Module, metaclass=abc.ABCMeta): pass class RegressionHeadNew(BaseHead): def __init__(self, hidden_size, hidden_dropout_prob): """From RobertaClassificationHead""" super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.dropout = nn.Dropout(hidden_dropout_prob) self.out_proj = nn.Linear(hidden_size, 1) def forward(self, input_0): primals_2 = self.dense.weight primals_3 = self.dense.bias primals_4 = self.out_proj.weight primals_5 = self.out_proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
HarshTrivedi/jiant-fork
RegressionHead
false
11,671
[ "MIT" ]
0
6b0150a8d923b0fca33f244a25e1bf2c74ea5f30
https://github.com/HarshTrivedi/jiant-fork/tree/6b0150a8d923b0fca33f244a25e1bf2c74ea5f30
BertLayerNormNoVar
import torch import torch.nn as nn class BertLayerNormNoVar(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNoVar, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) x = x - u return self.weight * x + self.bias def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp14 = tmp12 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class BertLayerNormNoVarNew(nn.Module): def __init__(self, hidden_size, eps=1e-12): super(BertLayerNormNoVarNew, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mahoumaru/auto_LiRPA
BertLayerNormNoVar
false
11,672
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
Transition
import torch import torch.nn as nn import torch.nn.functional as F class Transition(nn.Module): def __init__(self, in_planes, out_planes): super(Transition, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True) def forward(self, x): out = self.conv(F.relu(x)) out = F.avg_pool2d(out, 2) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_planes': 4, 'out_planes': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_avg_pool2d_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_avg_pool2d_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf3, primals_2, buf0, buf2 class TransitionNew(nn.Module): def __init__(self, in_planes, out_planes): super(TransitionNew, self).__init__() self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=True) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mahoumaru/auto_LiRPA
Transition
false
11,673
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
UpBlock
import torch import torch.utils.data from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class UpBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias =True, activation='prelu', norm=None): super(UpBlock, self).__init__() self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, x): h0 = self.up_conv1(x) l0 = self.up_conv2(h0) h1 = self.up_conv3(l0 - x) return h1 + h0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp8, None) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, None) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x3, tmp2, None) tl.store(out_ptr0 + x3, tmp10, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 16, 16), (1024, 256, 16, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(4096)](buf1, primals_2, primals_4, buf2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_1[grid(256)](buf4, primals_6, primals_7, primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 16, 16), (1024, 256, 16, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused__prelu_kernel_add_convolution_2[grid(4096)](buf7, primals_9, primals_10, buf2, buf8, 4096, XBLOCK=256, num_warps= 4, num_stages=1) del primals_9 return (buf8, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, buf1, buf2, buf4, buf5, buf7) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class UpBlockNew(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, bias =True, activation='prelu', norm=None): super(UpBlockNew, self).__init__() self.up_conv1 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv2 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.up_conv3 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, input_0): primals_1 = self.up_conv1.deconv.weight primals_2 = self.up_conv1.deconv.bias primals_4 = self.up_conv1.act.weight primals_5 = self.up_conv2.conv.weight primals_6 = self.up_conv2.conv.bias primals_7 = self.up_conv2.act.weight primals_8 = self.up_conv3.deconv.weight primals_9 = self.up_conv3.deconv.bias primals_10 = self.up_conv3.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0]
HamsterBiz/iSeeBetter
UpBlock
false
11,674
[ "MIT" ]
0
a71cee61583bdedab1f3b368e2cb7dc5ad969aed
https://github.com/HamsterBiz/iSeeBetter/tree/a71cee61583bdedab1f3b368e2cb7dc5ad969aed
mlp_2layer
import torch import torch.nn as nn import torch.nn.functional as F class mlp_2layer(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_2layer, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 10) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 64), (64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (10, 256), (256, 1)) assert_size_stride(primals_5, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2) del primals_5 return buf2, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, primals_4 class mlp_2layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_2layerNew, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Mahoumaru/auto_LiRPA
mlp_2layer
false
11,675
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
D_DownBlock
import torch import torch.utils.data from torchvision.transforms import * class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class D_DownBlock(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None): super(D_DownBlock, self).__init__() self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0, activation, norm=None) self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, x): x = self.conv(x) l0 = self.down_conv1(x) h0 = self.down_conv2(l0) l1 = self.down_conv3(h0 - x) return l1 + l0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'num_filter': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__prelu_kernel_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__prelu_kernel_convolution_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x3, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 - tmp9 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp10, xmask) @triton.jit def triton_poi_fused__prelu_kernel_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp9 = tl.load(in_ptr2 + x2, xmask) tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp7 = tmp6 * tmp2 tmp8 = tl.where(tmp4, tmp2, tmp7) tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x2, tmp2, xmask) tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1,), (1,)) assert_size_stride(primals_5, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (1,), (1,)) assert_size_stride(primals_11, (4, 4, 8, 8), (256, 64, 8, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__prelu_kernel_convolution_0[grid(256)](buf1, primals_2, primals_4, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 1, 1), (4, 1, 1, 1)) buf4 = buf3 del buf3 buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_1[grid(16)](buf4, primals_6, primals_7, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_6 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1)) buf7 = buf6 del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__prelu_kernel_convolution_sub_2[grid(256)](buf7, primals_9, primals_10, buf2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf9 = extern_kernels.convolution(buf8, primals_11, stride=(4, 4), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 4, 1, 1), (4, 1, 1, 1)) buf10 = buf9 del buf9 buf11 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused__prelu_kernel_add_convolution_3[grid(16)](buf10, primals_12, primals_13, buf5, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_12 return (buf11, primals_1, primals_3, primals_4, primals_5, primals_7, primals_8, primals_10, primals_11, primals_13, buf1, buf2, buf4, buf5, buf7, buf8, buf10) class ConvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None): super(ConvBlock, self).__init__() self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.conv(x)) else: out = self.conv(x) if self.activation is not None: return self.act(out) else: return out class DeconvBlock(torch.nn.Module): def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None): super(DeconvBlock, self).__init__() self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias) self.norm = norm if self.norm == 'batch': self.bn = torch.nn.BatchNorm2d(output_size) elif self.norm == 'instance': self.bn = torch.nn.InstanceNorm2d(output_size) self.activation = activation if self.activation == 'relu': self.act = torch.nn.ReLU(True) elif self.activation == 'prelu': self.act = torch.nn.PReLU() elif self.activation == 'lrelu': self.act = torch.nn.LeakyReLU(0.2, True) elif self.activation == 'tanh': self.act = torch.nn.Tanh() elif self.activation == 'sigmoid': self.act = torch.nn.Sigmoid() def forward(self, x): if self.norm is not None: out = self.bn(self.deconv(x)) else: out = self.deconv(x) if self.activation is not None: return self.act(out) else: return out class D_DownBlockNew(torch.nn.Module): def __init__(self, num_filter, kernel_size=8, stride=4, padding=2, num_stages=1, bias=True, activation='prelu', norm=None): super(D_DownBlockNew, self).__init__() self.conv = ConvBlock(num_filter * num_stages, num_filter, 1, 1, 0, activation, norm=None) self.down_conv1 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv2 = DeconvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) self.down_conv3 = ConvBlock(num_filter, num_filter, kernel_size, stride, padding, activation, norm=None) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_4 = self.conv.act.weight primals_5 = self.down_conv1.conv.weight primals_6 = self.down_conv1.conv.bias primals_7 = self.down_conv1.act.weight primals_8 = self.down_conv2.deconv.weight primals_9 = self.down_conv2.deconv.bias primals_10 = self.down_conv2.act.weight primals_11 = self.down_conv3.conv.weight primals_12 = self.down_conv3.conv.bias primals_13 = self.down_conv3.act.weight primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
HamsterBiz/iSeeBetter
D_DownBlock
false
11,676
[ "MIT" ]
0
a71cee61583bdedab1f3b368e2cb7dc5ad969aed
https://github.com/HamsterBiz/iSeeBetter/tree/a71cee61583bdedab1f3b368e2cb7dc5ad969aed
RAEClassifier
import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable class ReactiveAutoencoder(nn.Module): """The RAE a.k.a. SRAE a.k.a. the autoencoder with the strict supervised sparsity matrix. This module provides a framework for training an encoder to maximize information throughput, while converging on an error_signal. Works currently only for single samples/online learning. Planned are batch mode as well as multiple layers.""" __constants__ = ['input_size', 'output_size'] def __init__(self, input_size, output_size, reconstruction_loss: 'nn.Module', hidden_activation: 'Callable[[torch.Tensor], torch.Tensor]'=None, reconstruction_activation: 'Callable[[torch.Tensor], torch.Tensor]' =None, bias=True, reconstruction_bias: 'str'='zeros', activation_scaling=True): super(ReactiveAutoencoder, self).__init__() self.input_size = input_size self.output_size = output_size self.hidden_activation = hidden_activation self.activation_scaling = activation_scaling if activation_scaling: self.scaling = None self.encoder = nn.Linear(input_size, output_size, bias=bias) self.h = torch.zeros(output_size, requires_grad=True) self.predict = torch.zeros(output_size) self.reconstruction_activation = reconstruction_activation self.reconstruction_loss = reconstruction_loss self.reconstructed_input = torch.zeros(input_size, requires_grad=True) self.reconstruction_bias_type = reconstruction_bias self.reconstruction_bias = self.fresh_reconstruction_bias(self. reconstruction_bias_type) def fresh_reconstruction_bias(self, type: 'str'): if type == 'none': return None elif type == 'zeros': return torch.zeros(self.input_size, requires_grad=True) elif type == 'ones': return torch.ones(self.input_size, requires_grad=True), elif type == 'rand': return torch.rand(self.input_size, requires_grad=True), elif type == 'randn': return torch.randn(self.input_size, requires_grad=True), def forward(self, x: 'torch.Tensor', error_signal: 'torch.Tensor'=None): """The forward pass calculates only the h if no error_signal is provided. If an error_signal is provided, then assume same x and use the last h for sparsity and reconstruction calculation. """ if error_signal is None: with torch.no_grad(): self.h = self.encoder(x) if self.hidden_activation is not None: self.h = self.hidden_activation(self.h) return self.h, None self.h.requires_grad_() self.reconstructed_input = F.linear(self.h, self.encoder.weight.t(), self.reconstruction_bias) if self.reconstruction_activation is not None: self.reconstructed_input = self.reconstruction_activation(self. reconstructed_input) rec_loss = self.reconstruction_loss(self.reconstructed_input, x) rec_loss.backward() self.predict = F.linear(x, self.encoder.weight + self.encoder. weight.grad, self.encoder.bias) delta = self.h - self.predict if self.activation_scaling: self.scaling = torch.max(torch.abs(error_signal)).item( ) / torch.max(delta).item() adjusted_delta = delta * self.scaling mask = torch.where((error_signal - adjusted_delta).abs() < error_signal.abs(), 1, 0) else: mask = torch.where((error_signal - delta).abs() < error_signal. abs(), 1, 0) self.encoder.zero_grad() masked_encoding = self.h * mask self.reconstructed_input = F.linear(masked_encoding, self.encoder. weight.t(), self.reconstruction_bias) return self.h, self.reconstructed_input def backward(self): super(ReactiveAutoencoder, self).backward() if self.activation_scaling: self.encoder.weight.grad *= self.scaling self.encoder.bias.grad *= self.scaling self.reconstruction_bias.grad += self.scaling def reset_parameters(self) ->None: super(ReactiveAutoencoder, self).reset_parameters() self.reconstruction_bias = self.fresh_reconstruction_bias(self. reconstruction_bias_type) class RAEClassifier(nn.Module): __constants__ = ['input_size', 'hidden_size', 'output_size'] def __init__(self, input_size, hidden_size, output_size, reconstruction_activation: 'Callable[[torch.Tensor], torch.Tensor]' =nn.ReLU(), hidden_activation: 'Callable[[torch.Tensor], torch.Tensor]'=nn.ReLU(), output_activation: 'Callable[[torch.Tensor], torch.Tensor]'=nn. Softmax(), reconstruction_loss: 'nn.Module'=nn.MSELoss()): super(RAEClassifier, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.input = torch.zeros(input_size) self.output_activation = output_activation self.reconstruction_loss = reconstruction_loss self.autoencoder = ReactiveAutoencoder(input_size, hidden_size, self.reconstruction_loss, hidden_activation, reconstruction_activation) self.classifier = nn.Linear(hidden_size, output_size) self.classifier.weight.register_hook(self.backward_classifier_hook) def forward(self, input): """The forward pass calculates only the h if no error_signal is provided.""" self.input = input encoding, _reconstruction = self.autoencoder(input) output = self.classifier(encoding) return self.output_activation(output) def backward_classifier_hook(self, grad): """Triggers autoencoder sparsification with classifier, after backward on this classifier.""" with torch.enable_grad(): _encoding, reconstruction = self.autoencoder(self.input, torch. sum(grad, 0)) rec_loss = self.reconstruction_loss(reconstruction, self.input) rec_loss.backward() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'output_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.functional as F from typing import Callable assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_1 del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_4 del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 return buf4, buf1, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf4 class ReactiveAutoencoder(nn.Module): """The RAE a.k.a. SRAE a.k.a. the autoencoder with the strict supervised sparsity matrix. This module provides a framework for training an encoder to maximize information throughput, while converging on an error_signal. Works currently only for single samples/online learning. Planned are batch mode as well as multiple layers.""" __constants__ = ['input_size', 'output_size'] def __init__(self, input_size, output_size, reconstruction_loss: 'nn.Module', hidden_activation: 'Callable[[torch.Tensor], torch.Tensor]'=None, reconstruction_activation: 'Callable[[torch.Tensor], torch.Tensor]' =None, bias=True, reconstruction_bias: 'str'='zeros', activation_scaling=True): super(ReactiveAutoencoder, self).__init__() self.input_size = input_size self.output_size = output_size self.hidden_activation = hidden_activation self.activation_scaling = activation_scaling if activation_scaling: self.scaling = None self.encoder = nn.Linear(input_size, output_size, bias=bias) self.h = torch.zeros(output_size, requires_grad=True) self.predict = torch.zeros(output_size) self.reconstruction_activation = reconstruction_activation self.reconstruction_loss = reconstruction_loss self.reconstructed_input = torch.zeros(input_size, requires_grad=True) self.reconstruction_bias_type = reconstruction_bias self.reconstruction_bias = self.fresh_reconstruction_bias(self. reconstruction_bias_type) def fresh_reconstruction_bias(self, type: 'str'): if type == 'none': return None elif type == 'zeros': return torch.zeros(self.input_size, requires_grad=True) elif type == 'ones': return torch.ones(self.input_size, requires_grad=True), elif type == 'rand': return torch.rand(self.input_size, requires_grad=True), elif type == 'randn': return torch.randn(self.input_size, requires_grad=True), def forward(self, x: 'torch.Tensor', error_signal: 'torch.Tensor'=None): """The forward pass calculates only the h if no error_signal is provided. If an error_signal is provided, then assume same x and use the last h for sparsity and reconstruction calculation. """ if error_signal is None: with torch.no_grad(): self.h = self.encoder(x) if self.hidden_activation is not None: self.h = self.hidden_activation(self.h) return self.h, None self.h.requires_grad_() self.reconstructed_input = F.linear(self.h, self.encoder.weight.t(), self.reconstruction_bias) if self.reconstruction_activation is not None: self.reconstructed_input = self.reconstruction_activation(self. reconstructed_input) rec_loss = self.reconstruction_loss(self.reconstructed_input, x) rec_loss.backward() self.predict = F.linear(x, self.encoder.weight + self.encoder. weight.grad, self.encoder.bias) delta = self.h - self.predict if self.activation_scaling: self.scaling = torch.max(torch.abs(error_signal)).item( ) / torch.max(delta).item() adjusted_delta = delta * self.scaling mask = torch.where((error_signal - adjusted_delta).abs() < error_signal.abs(), 1, 0) else: mask = torch.where((error_signal - delta).abs() < error_signal. abs(), 1, 0) self.encoder.zero_grad() masked_encoding = self.h * mask self.reconstructed_input = F.linear(masked_encoding, self.encoder. weight.t(), self.reconstruction_bias) return self.h, self.reconstructed_input def backward(self): super(ReactiveAutoencoder, self).backward() if self.activation_scaling: self.encoder.weight.grad *= self.scaling self.encoder.bias.grad *= self.scaling self.reconstruction_bias.grad += self.scaling def reset_parameters(self) ->None: super(ReactiveAutoencoder, self).reset_parameters() self.reconstruction_bias = self.fresh_reconstruction_bias(self. reconstruction_bias_type) class RAEClassifierNew(nn.Module): __constants__ = ['input_size', 'hidden_size', 'output_size'] def __init__(self, input_size, hidden_size, output_size, reconstruction_activation: 'Callable[[torch.Tensor], torch.Tensor]' =nn.ReLU(), hidden_activation: 'Callable[[torch.Tensor], torch.Tensor]'=nn.ReLU(), output_activation: 'Callable[[torch.Tensor], torch.Tensor]'=nn. Softmax(), reconstruction_loss: 'nn.Module'=nn.MSELoss()): super(RAEClassifierNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.input = torch.zeros(input_size) self.output_activation = output_activation self.reconstruction_loss = reconstruction_loss self.autoencoder = ReactiveAutoencoder(input_size, hidden_size, self.reconstruction_loss, hidden_activation, reconstruction_activation) self.classifier = nn.Linear(hidden_size, output_size) self.classifier.weight.register_hook(self.backward_classifier_hook) def backward_classifier_hook(self, grad): """Triggers autoencoder sparsification with classifier, after backward on this classifier.""" with torch.enable_grad(): _encoding, reconstruction = self.autoencoder(self.input, torch. sum(grad, 0)) rec_loss = self.reconstruction_loss(reconstruction, self.input) rec_loss.backward() def forward(self, input_0): primals_2 = self.autoencoder.encoder.weight primals_3 = self.autoencoder.encoder.bias primals_4 = self.classifier.weight primals_5 = self.classifier.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
MHHukiewitz/SRAE_pytorch
RAEClassifier
false
11,677
[ "MIT" ]
0
91f961f740c96cdb49739c9738ed330af59750d0
https://github.com/MHHukiewitz/SRAE_pytorch/tree/91f961f740c96cdb49739c9738ed330af59750d0
SuperPointNet
import torch import torch.optim import torch.utils.data class SuperPointNet(torch.nn.Module): """ Pytorch definition of SuperPoint Network. """ def __init__(self): super(SuperPointNet, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256 self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1 ) self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, x): """ Forward pass that jointly computes unprocessed point and descriptor tensors. Input x: Image pytorch tensor shaped N x 1 x H x W. Output semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8. desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8. """ x = self.relu(self.conv1a(x)) x = self.relu(self.conv1b(x)) x = self.pool(x) x = self.relu(self.conv2a(x)) x = self.relu(self.conv2b(x)) x = self.pool(x) x = self.relu(self.conv3a(x)) x = self.relu(self.conv3b(x)) x = self.pool(x) x = self.relu(self.conv4a(x)) x = self.relu(self.conv4b(x)) cPa = self.relu(self.convPa(x)) semi = self.convPb(cPa) cDa = self.relu(self.convDa(x)) desc = self.convDb(cDa) dn = torch.norm(desc, p=2, dim=1) desc = desc.div(torch.unsqueeze(dn, 1)) return semi, desc def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.optim import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_relu_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 256 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (y0 + 64 * x2 + 262144 * y1), tmp4, ymask) @triton.jit def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 16 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 8 x2 = xindex // 1024 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None) tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None) tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_relu_12(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_13(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 260 xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 65 y1 = yindex // 65 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 65 * x2 + 4160 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused_convolution_linalg_vector_norm_14(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_out_ptr0 + (r1 + 256 * x0), None) tmp1 = tl.load(in_ptr0 + r1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp7 = libdevice.sqrt(tmp6) tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp2, None) tl.debug_barrier() tl.store(in_out_ptr1 + x0, tmp7, None) @triton.jit def triton_poi_fused_div_15(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 64 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 256 y1 = yindex // 256 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 16384 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 64 * y1), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + (x2 + 64 * y3), tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25) = args args.clear() assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_15, (128,), (1,)) assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_17, (128,), (1,)) assert_size_stride(primals_18, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_19, (256,), (1,)) assert_size_stride(primals_20, (65, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_21, (65,), (1,)) assert_size_stride(primals_22, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_23, (256,), (1,)) assert_size_stride(primals_24, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_25, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) get_raw_stream(0) triton_poi_fused_0[grid(4096, 9)](primals_4, buf0, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf1 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_0[grid(4096, 9)](primals_6, buf1, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf2 = empty_strided_cuda((64, 64, 3, 3), (576, 1, 192, 64), torch. float32) triton_poi_fused_0[grid(4096, 9)](primals_8, buf2, 4096, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_1[grid(8192, 9)](primals_10, buf3, 8192, 9, XBLOCK =16, YBLOCK=64, num_warps=4, num_stages=1) del primals_10 buf4 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_12, buf4, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_12 buf5 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_14, buf5, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_14 buf6 = empty_strided_cuda((128, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_2[grid(16384, 9)](primals_16, buf6, 16384, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_16 buf7 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_18, buf7, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_18 buf8 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_22, buf8, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_22 buf9 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf10 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_convolution_relu_4[grid(256, 4096)](buf9, primals_2, buf10, 256, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del buf9 del primals_2 buf11 = extern_kernels.convolution(buf10, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf12 = buf11 del buf11 triton_poi_fused_convolution_relu_5[grid(1048576)](buf12, primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf13 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf14 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf12, buf13, buf14, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf15 = extern_kernels.convolution(buf13, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf16 = buf15 del buf15 triton_poi_fused_convolution_relu_7[grid(262144)](buf16, primals_7, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf17 = extern_kernels.convolution(buf16, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf17, (4, 64, 32, 32), (65536, 1, 2048, 64)) buf18 = buf17 del buf17 triton_poi_fused_convolution_relu_7[grid(262144)](buf18, primals_9, 262144, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf19 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32) buf20 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(65536)](buf18, buf19, buf20, 65536, XBLOCK=256, num_warps=4, num_stages=1) buf21 = extern_kernels.convolution(buf19, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf21, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf22 = buf21 del buf21 triton_poi_fused_convolution_relu_9[grid(131072)](buf22, primals_11, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_11 buf23 = extern_kernels.convolution(buf22, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 128, 16, 16), (32768, 1, 2048, 128)) buf24 = buf23 del buf23 triton_poi_fused_convolution_relu_9[grid(131072)](buf24, primals_13, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_13 buf25 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32) buf26 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(32768)](buf24, buf25, buf26, 32768, XBLOCK=256, num_warps=4, num_stages=1) buf27 = extern_kernels.convolution(buf25, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf27, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf28 = buf27 del buf27 triton_poi_fused_convolution_relu_11[grid(32768)](buf28, primals_15, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_15 buf29 = extern_kernels.convolution(buf28, buf6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf29, (4, 128, 8, 8), (8192, 1, 1024, 128)) buf30 = buf29 del buf29 triton_poi_fused_convolution_relu_11[grid(32768)](buf30, primals_17, 32768, XBLOCK=256, num_warps=4, num_stages=1) del primals_17 buf31 = extern_kernels.convolution(buf30, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf31, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf32 = buf31 del buf31 triton_poi_fused_convolution_relu_12[grid(65536)](buf32, primals_19, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_19 buf33 = extern_kernels.convolution(buf32, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf33, (4, 65, 8, 8), (4160, 1, 520, 65)) buf34 = empty_strided_cuda((4, 65, 8, 8), (4160, 64, 8, 1), torch. float32) triton_poi_fused_convolution_13[grid(260, 64)](buf33, primals_21, buf34, 260, 64, XBLOCK=64, YBLOCK=4, num_warps=4, num_stages=1) del buf33 del primals_21 buf35 = extern_kernels.convolution(buf30, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf35, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf36 = buf35 del buf35 triton_poi_fused_convolution_relu_12[grid(65536)](buf36, primals_23, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_23 buf37 = extern_kernels.convolution(buf36, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf37, (4, 256, 8, 8), (16384, 1, 2048, 256)) buf38 = buf37 del buf37 buf39 = empty_strided_cuda((4, 8, 8), (64, 8, 1), torch.float32) buf40 = buf39 del buf39 triton_per_fused_convolution_linalg_vector_norm_14[grid(256)](buf38, buf40, primals_25, 256, 256, num_warps=2, num_stages=1) del primals_25 buf41 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch .float32) triton_poi_fused_div_15[grid(1024, 64)](buf38, buf40, buf41, 1024, 64, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) return (buf34, buf41, primals_1, primals_3, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, primals_20, buf8, primals_24, buf10, buf12, buf13, buf14, buf16, buf18, buf19, buf20, buf22, buf24, buf25, buf26, buf28, buf30, buf32, buf36, buf38, reinterpret_tensor(buf40, (4, 1, 8, 8), (64, 64, 8, 1), 0)) class SuperPointNetNew(torch.nn.Module): """ Pytorch definition of SuperPoint Network. """ def __init__(self): super(SuperPointNetNew, self).__init__() self.relu = torch.nn.ReLU(inplace=True) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256 self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1 ) self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1) self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1) self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1) self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1) self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1) self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1) self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1) self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0) self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1) self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1a.weight primals_2 = self.conv1a.bias primals_4 = self.conv1b.weight primals_5 = self.conv1b.bias primals_6 = self.conv2a.weight primals_7 = self.conv2a.bias primals_8 = self.conv2b.weight primals_9 = self.conv2b.bias primals_10 = self.conv3a.weight primals_11 = self.conv3a.bias primals_12 = self.conv3b.weight primals_13 = self.conv3b.bias primals_14 = self.conv4a.weight primals_15 = self.conv4a.bias primals_16 = self.conv4b.weight primals_17 = self.conv4b.bias primals_18 = self.convPa.weight primals_19 = self.convPa.bias primals_20 = self.convPb.weight primals_21 = self.convPb.bias primals_22 = self.convDa.weight primals_23 = self.convDa.bias primals_24 = self.convDb.weight primals_25 = self.convDb.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25]) return output[0], output[1]
LeikvollE/pytorch-superpoint
SuperPointNet
false
11,678
[ "MIT" ]
0
52144a760e0cc46259e57397a5a55f0585fe6d0b
https://github.com/LeikvollE/pytorch-superpoint/tree/52144a760e0cc46259e57397a5a55f0585fe6d0b
cnn_4layer
import torch import torch.nn as nn import torch.nn.functional as F class cnn_4layer(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256): super(cnn_4layer, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (256, 16), (16, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (10, 256), (256, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 16, 1, 1), (16, 1, 1, 1)) buf3 = reinterpret_tensor(buf2, (4, 16, 1, 1), (16, 1, 64, 64), 0) del buf2 buf7 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf3, primals_5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_2[grid(1024)](buf5, primals_7, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf5, reinterpret_tensor(primals_8, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf6) del primals_9 return buf6, primals_1, primals_3, primals_4, buf1, reinterpret_tensor(buf3 , (4, 16), (16, 1), 0), buf5, primals_8, primals_6, buf7 class cnn_4layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256): super(cnn_4layerNew, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Mahoumaru/auto_LiRPA
cnn_4layer
false
11,679
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
mlp_3layer
import torch import torch.nn as nn import torch.nn.functional as F class mlp_3layer(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_3layer, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 128 * width) self.fc3 = nn.Linear(128 * width, 10) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 64), (64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (128, 256), (256, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (10, 128), (128, 1)) assert_size_stride(primals_7, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 128), ( 1, 256), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(512)](buf3, primals_5, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf4) del primals_7 return buf4, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, buf3, primals_6, primals_4 class mlp_3layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_3layerNew, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 128 * width) self.fc3 = nn.Linear(128 * width, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Mahoumaru/auto_LiRPA
mlp_3layer
false
11,680
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
mlp_5layer
import torch import torch.nn as nn import torch.nn.functional as F class mlp_5layer(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_5layer, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 256 * width) self.fc3 = nn.Linear(256 * width, 256 * width) self.fc4 = nn.Linear(256 * width, 128 * width) self.fc5 = nn.Linear(128 * width, 10) def forward(self, x): x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.relu(self.fc4(x)) x = self.fc5(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (256, 64), (64, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 256), (256, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (128, 256), (256, 1)) assert_size_stride(primals_9, (128,), (1,)) assert_size_stride(primals_10, (10, 128), (128, 1)) assert_size_stride(primals_11, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), reinterpret_tensor(primals_2, (64, 256), (1, 64), 0), out=buf0) del primals_2 buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (256, 256), ( 1, 256), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_0[grid(1024)](buf3, primals_5, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf3, reinterpret_tensor(primals_6, (256, 256), ( 1, 256), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_relu_0[grid(1024)](buf5, primals_7, 1024, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf5, reinterpret_tensor(primals_8, (256, 128), ( 1, 256), 0), out=buf6) buf7 = buf6 del buf6 triton_poi_fused_relu_1[grid(512)](buf7, primals_9, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_11, buf7, reinterpret_tensor( primals_10, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf8) del primals_11 return buf8, reinterpret_tensor(primals_1, (4, 64), (64, 1), 0 ), buf1, buf3, buf5, buf7, primals_10, primals_8, primals_6, primals_4 class mlp_5layerNew(nn.Module): def __init__(self, in_ch, in_dim, width=1): super(mlp_5layerNew, self).__init__() self.fc1 = nn.Linear(in_ch * in_dim * in_dim, 256 * width) self.fc2 = nn.Linear(256 * width, 256 * width) self.fc3 = nn.Linear(256 * width, 256 * width) self.fc4 = nn.Linear(256 * width, 128 * width) self.fc5 = nn.Linear(128 * width, 10) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_10 = self.fc5.weight primals_11 = self.fc5.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Mahoumaru/auto_LiRPA
mlp_5layer
false
11,681
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
cnn_7layer_alt
import torch import torch.nn as nn import torch.nn.functional as F class cnn_7layer_alt(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=128): super(cnn_7layer_alt, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1) self.conv3 = nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1) self.conv4 = nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, linear_size) self.fc3 = nn.Linear(linear_size, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = F.relu(self.conv4(x)) x = x.view(x.size(0), -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15) = args args.clear() assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (8, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (8,), (1,)) assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1)) assert_size_stride(primals_7, (16,), (1,)) assert_size_stride(primals_8, (16, 16, 4, 4), (256, 16, 4, 1)) assert_size_stride(primals_9, (16,), (1,)) assert_size_stride(primals_10, (128, 16), (16, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (128, 128), (128, 1)) assert_size_stride(primals_13, (128,), (1,)) assert_size_stride(primals_14, (10, 128), (128, 1)) assert_size_stride(primals_15, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(512)](buf1, primals_2, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 8, 2, 2), (32, 4, 2, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(128)](buf3, primals_5, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 16, 2, 2), (64, 4, 2, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 16, 1, 1), (16, 1, 1, 1)) buf7 = reinterpret_tensor(buf6, (4, 16, 1, 1), (16, 1, 64, 64), 0) del buf6 buf13 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_3[grid(64)](buf7, primals_9, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf7, (4, 16), (16, 1), 0), reinterpret_tensor(primals_10, (16, 128), (1, 16), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(512)](buf9, primals_11, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_11 buf10 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf9, reinterpret_tensor(primals_12, (128, 128), (1, 128), 0), out=buf10) buf11 = buf10 del buf10 triton_poi_fused_relu_4[grid(512)](buf11, primals_13, 512, XBLOCK= 256, num_warps=4, num_stages=1) del primals_13 buf12 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_15, buf11, reinterpret_tensor( primals_14, (128, 10), (1, 128), 0), alpha=1, beta=1, out=buf12) del primals_15 return (buf12, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, reinterpret_tensor(buf7, (4, 16), (16, 1), 0), buf9, buf11, primals_14, primals_12, primals_10, buf13) class cnn_7layer_altNew(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=128): super(cnn_7layer_altNew, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 3, stride=1, padding=1) self.conv2 = nn.Conv2d(4 * width, 4 * width, 4, stride=2, padding=1) self.conv3 = nn.Conv2d(4 * width, 8 * width, 3, stride=1, padding=1) self.conv4 = nn.Conv2d(8 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, linear_size) self.fc3 = nn.Linear(linear_size, 10) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_14 = self.fc3.weight primals_15 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15]) return output[0]
Mahoumaru/auto_LiRPA
cnn_7layer_alt
false
11,682
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
ASPP
import torch from torch import nn import torch.nn.functional as F import torch.cuda class ASPP(nn.Module): """ Atrous spatial pyramid pooling used in object detection and segmentation. """ def __init__(self, in_channel=512, depth=256): super().__init__() self.mean = nn.AdaptiveAvgPool2d((1, 1)) self.conv = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6) self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12) self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18) self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1) def forward(self, x): size = x.shape[2:] image_features = self.mean(x) image_features = self.conv(image_features) image_features = F.upsample(image_features, size=size, mode='bilinear') atrous_block1 = self.atrous_block1(x) atrous_block6 = self.atrous_block6(x) atrous_block12 = self.atrous_block12(x) atrous_block18 = self.atrous_block18(x) net = self.conv_1x1_output(torch.cat([image_features, atrous_block1, atrous_block6, atrous_block12, atrous_block18], dim=1)) return net def get_inputs(): return [torch.rand([4, 512, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), None) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.sum(tmp1, 1)[:, None] tmp4 = 16.0 tmp5 = tmp3 / tmp4 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.25 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex // 16 x2 = xindex // 16 % 256 x6 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last') tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp17 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 1, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tl.where(tmp7, tmp6, tmp5) tmp11 = tmp9 + tmp10 tmp13 = tmp12 + tmp1 tmp14 = tmp12 < 0 tl.where(tmp14, tmp13, tmp12) tmp16 = tmp11 - tmp11 tmp18 = tmp16 * tmp17 tmp19 = tmp11 + tmp18 tl.store(out_ptr0 + x6, tmp19, None) @triton.jit def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, out_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex // 16 % 1280 x3 = xindex // 20480 x4 = xindex % 16 x1 = xindex // 4 % 4 x0 = xindex % 4 x5 = xindex tmp0 = x2 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 256, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 4096 * x3), tmp4, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0) tmp7 = tl.full([XBLOCK], 1, tl.int32) tmp8 = tmp6 + tmp7 tmp9 = tmp6 < 0 tl.where(tmp9, tmp8, tmp6) tmp11 = tl.load(in_ptr2 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp12 = tmp11 + tmp7 tmp13 = tmp11 < 0 tl.where(tmp13, tmp12, tmp11) tmp15 = tl.load(in_ptr3 + (256 * x3 + x2), tmp4, eviction_policy= 'evict_last', other=0.0) tmp16 = tl.load(in_ptr4 + x2, tmp4, eviction_policy='evict_last', other=0.0 ) tmp17 = tmp15 + tmp16 tmp18 = tl.load(in_ptr5 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp19 = tmp18 + tmp7 tmp20 = tmp18 < 0 tl.where(tmp20, tmp19, tmp18) tmp22 = tmp17 - tmp17 tmp23 = tl.load(in_ptr6 + x0, tmp4, eviction_policy='evict_last', other=0.0 ) tmp24 = tmp22 * tmp23 tmp25 = tmp17 + tmp24 tmp26 = tmp25 - tmp5 tmp27 = tl.load(in_ptr7 + x1, tmp4, eviction_policy='evict_last', other=0.0 ) tmp28 = tmp26 * tmp27 tmp29 = tmp5 + tmp28 tmp30 = tl.full(tmp29.shape, 0.0, tmp29.dtype) tmp31 = tl.where(tmp4, tmp29, tmp30) tmp32 = tmp0 >= tmp3 tmp33 = tl.full([1], 512, tl.int64) tmp34 = tmp0 < tmp33 tmp35 = tmp32 & tmp34 tmp36 = tl.load(in_ptr8 + (x4 + 16 * (-256 + x2) + 4096 * x3), tmp35, other=0.0) tmp37 = tl.load(in_ptr9 + (-256 + x2), tmp35, eviction_policy= 'evict_last', other=0.0) tmp38 = tmp36 + tmp37 tmp39 = tl.full(tmp38.shape, 0.0, tmp38.dtype) tmp40 = tl.where(tmp35, tmp38, tmp39) tmp41 = tmp0 >= tmp33 tmp42 = tl.full([1], 768, tl.int64) tmp43 = tmp0 < tmp42 tmp44 = tmp41 & tmp43 tmp45 = tl.load(in_ptr10 + (x4 + 16 * (-512 + x2) + 4096 * x3), tmp44, other=0.0) tmp46 = tl.load(in_ptr11 + (-512 + x2), tmp44, eviction_policy= 'evict_last', other=0.0) tmp47 = tmp45 + tmp46 tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype) tmp49 = tl.where(tmp44, tmp47, tmp48) tmp50 = tmp0 >= tmp42 tmp51 = tl.full([1], 1024, tl.int64) tmp52 = tmp0 < tmp51 tmp53 = tmp50 & tmp52 tmp54 = tl.load(in_ptr12 + (x4 + 16 * (-768 + x2) + 4096 * x3), tmp53, other=0.0) tmp55 = tl.load(in_ptr13 + (-768 + x2), tmp53, eviction_policy= 'evict_last', other=0.0) tmp56 = tmp54 + tmp55 tmp57 = tl.full(tmp56.shape, 0.0, tmp56.dtype) tmp58 = tl.where(tmp53, tmp56, tmp57) tmp59 = tmp0 >= tmp51 tl.full([1], 1280, tl.int64) tmp62 = tl.load(in_ptr14 + (x4 + 16 * (-1024 + x2) + 4096 * x3), tmp59, other=0.0) tmp63 = tl.load(in_ptr15 + (-1024 + x2), tmp59, eviction_policy= 'evict_last', other=0.0) tmp64 = tmp62 + tmp63 tmp65 = tl.full(tmp64.shape, 0.0, tmp64.dtype) tmp66 = tl.where(tmp59, tmp64, tmp65) tmp67 = tl.where(tmp53, tmp58, tmp66) tmp68 = tl.where(tmp44, tmp49, tmp67) tmp69 = tl.where(tmp35, tmp40, tmp68) tmp70 = tl.where(tmp4, tmp31, tmp69) tl.store(out_ptr0 + x5, tmp70, None) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 256 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4, 512, 4, 4), (8192, 16, 4, 1)) assert_size_stride(primals_2, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_9, (256,), (1,)) assert_size_stride(primals_10, (256, 512, 3, 3), (4608, 9, 3, 1)) assert_size_stride(primals_11, (256,), (1,)) assert_size_stride(primals_12, (256, 1280, 1, 1), (1280, 1, 1, 1)) assert_size_stride(primals_13, (256,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 1, 1), (512, 1, 2048, 2048), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 512, 1, 1), (512, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(2048)](buf1, primals_1, 2048, 16, XBLOCK=32, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 256, 1, 1), (256, 1, 1, 1)) buf3 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf3, 4, XBLOCK=4, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf4, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf5 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(4)](buf5, 4, XBLOCK=4, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(4)](buf6, 4, XBLOCK=4, num_warps= 1, num_stages=1) buf7 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf7, 4, XBLOCK=4, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch. float32) triton_poi_fused__unsafe_index_add_convolution_mul_sub_4[grid(16384)]( buf3, buf5, buf2, primals_3, buf6, buf7, buf8, 16384, XBLOCK= 256, num_warps=4, num_stages=1) buf9 = empty_strided_cuda((4, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(4)](buf9, 4, XBLOCK=4, num_warps=1, num_stages=1) buf10 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 256, 4, 4), (4096, 16, 4, 1)) buf11 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 256, 4, 4), (4096, 16, 4, 1)) buf12 = extern_kernels.convolution(primals_1, primals_8, stride=(1, 1), padding=(12, 12), dilation=(12, 12), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 256, 4, 4), (4096, 16, 4, 1)) buf13 = extern_kernels.convolution(primals_1, primals_10, stride=(1, 1), padding=(18, 18), dilation=(18, 18), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf13, (4, 256, 4, 4), (4096, 16, 4, 1)) buf14 = empty_strided_cuda((4, 1280, 4, 4), (20480, 16, 4, 1), torch.float32) triton_poi_fused_cat_5[grid(81920)](buf8, buf4, buf5, buf2, primals_3, buf6, buf7, buf9, buf10, primals_5, buf11, primals_7, buf12, primals_9, buf13, primals_11, buf14, 81920, XBLOCK=1024, num_warps=4, num_stages=1) del buf10 del buf11 del buf12 del buf13 del buf2 del buf8 del primals_11 del primals_3 del primals_5 del primals_7 del primals_9 buf15 = extern_kernels.convolution(buf14, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 4, 4), (4096, 16, 4, 1)) buf16 = buf15 del buf15 triton_poi_fused_convolution_6[grid(16384)](buf16, primals_13, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_13 return (buf16, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, buf1, buf3, buf4, buf5, buf6, buf7, buf9, buf14 ) class ASPPNew(nn.Module): """ Atrous spatial pyramid pooling used in object detection and segmentation. """ def __init__(self, in_channel=512, depth=256): super().__init__() self.mean = nn.AdaptiveAvgPool2d((1, 1)) self.conv = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block1 = nn.Conv2d(in_channel, depth, 1, 1) self.atrous_block6 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6) self.atrous_block12 = nn.Conv2d(in_channel, depth, 3, 1, padding=12, dilation=12) self.atrous_block18 = nn.Conv2d(in_channel, depth, 3, 1, padding=18, dilation=18) self.conv_1x1_output = nn.Conv2d(depth * 5, depth, 1, 1) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = self.atrous_block1.weight primals_5 = self.atrous_block1.bias primals_6 = self.atrous_block6.weight primals_7 = self.atrous_block6.bias primals_8 = self.atrous_block12.weight primals_9 = self.atrous_block12.bias primals_10 = self.atrous_block18.weight primals_11 = self.atrous_block18.bias primals_12 = self.conv_1x1_output.weight primals_13 = self.conv_1x1_output.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
LoveEachDay/towhee
ASPP
false
11,683
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
FCNetwork
import torch import torch.nn as nn import torch.nn.functional as F class FCNetwork(nn.Module): def __init__(self, state_size, action_size, output_gate=None): super(FCNetwork, self).__init__() self.fc1 = nn.Linear(state_size, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, action_size) self.output_gate = output_gate def forward(self, input): x = F.relu(self.fc1(input)) x = F.relu(self.fc2(x)) x = self.fc3(x) if self.output_gate is not None: x = self.output_gate(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (256, 256), (256, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256), (256, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf6, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 256), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf3, primals_5, buf5, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 256), (256, 1), 0), reinterpret_tensor(primals_6, (256, 4), (1, 256), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), reinterpret_tensor(buf3, (64, 256), (256, 1), 0 ), primals_6, buf5, primals_4, buf6 class FCNetworkNew(nn.Module): def __init__(self, state_size, action_size, output_gate=None): super(FCNetworkNew, self).__init__() self.fc1 = nn.Linear(state_size, 256) self.fc2 = nn.Linear(256, 256) self.fc3 = nn.Linear(256, action_size) self.output_gate = output_gate def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
JoshVarty/Reacher
FCNetwork
false
11,684
[ "MIT" ]
0
cab41484aaaeeae177cc625c3697d7e7cd80c2ed
https://github.com/JoshVarty/Reacher/tree/cab41484aaaeeae177cc625c3697d7e7cd80c2ed
Upsample_interpolate
import torch import torch.nn as nn import torch.nn.functional as F class Upsample_interpolate(nn.Module): def __init__(self, stride): super(Upsample_interpolate, self).__init__() self.stride = stride def forward(self, x): x_numpy = x.cpu().detach().numpy() H = x_numpy.shape[2] W = x_numpy.shape[3] H = H * self.stride W = W * self.stride out = F.interpolate(x, size=(H, W), mode='nearest') return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'stride': 1}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = x1 tmp1 = tmp0.to(tl.float32) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tmp5 = x0 tmp6 = tmp5.to(tl.float32) tmp7 = tmp6 * tmp2 tmp8 = tmp7.to(tl.int32) tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x4, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_index_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class Upsample_interpolateNew(nn.Module): def __init__(self, stride): super(Upsample_interpolateNew, self).__init__() self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mathiebhan/darknet_ros
Upsample_interpolate
false
11,685
[ "BSD-3-Clause" ]
0
04a97b61b6b3b086da1a46331a747accd37d05f9
https://github.com/Mathiebhan/darknet_ros/tree/04a97b61b6b3b086da1a46331a747accd37d05f9
cnn_4layer_LeakyRelu
import torch import torch.nn as nn import torch.nn.functional as F class cnn_4layer_LeakyRelu(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1): super(cnn_4layer_LeakyRelu, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) self.alpha = alpha def forward(self, x): x = F.leaky_relu(self.conv1(x), self.alpha) x = F.leaky_relu(self.conv2(x), self.alpha) x = x.view(x.size(0), -1) x = F.leaky_relu(self.fc1(x), self.alpha) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_ch': 4, 'in_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr1 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.1 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (8, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (16, 8, 4, 4), (128, 16, 4, 1)) assert_size_stride(primals_5, (16,), (1,)) assert_size_stride(primals_6, (256, 16), (16, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (10, 256), (256, 1)) assert_size_stride(primals_9, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 2, 2), (32, 4, 2, 1)) buf1 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.bool) buf2 = empty_strided_cuda((4, 8, 2, 2), (32, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(128)](buf0, primals_2, buf1, buf2, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 16, 1, 1), (16, 1, 1, 1)) buf4 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 1, 1), torch.bool) buf5 = empty_strided_cuda((4, 16, 1, 1), (16, 1, 64, 64), torch.float32 ) triton_poi_fused_convolution_leaky_relu_1[grid(64)](buf3, primals_5, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_5 buf6 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (4, 16), (16, 1), 0), reinterpret_tensor(primals_6, (16, 256), (1, 16), 0), out=buf6) buf7 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf8 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(1024)](buf6, primals_7, buf7, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_7 buf9 = empty_strided_cuda((4, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf9) del primals_9 return (buf9, primals_1, primals_3, primals_4, buf1, buf2, buf4, reinterpret_tensor(buf5, (4, 16), (16, 1), 0), buf7, buf8, primals_8, primals_6) class cnn_4layer_LeakyReluNew(nn.Module): def __init__(self, in_ch, in_dim, width=2, linear_size=256, alpha=0.1): super(cnn_4layer_LeakyReluNew, self).__init__() self.conv1 = nn.Conv2d(in_ch, 4 * width, 4, stride=2, padding=1) self.conv2 = nn.Conv2d(4 * width, 8 * width, 4, stride=2, padding=1) self.fc1 = nn.Linear(8 * width * (in_dim // 4) * (in_dim // 4), linear_size) self.fc2 = nn.Linear(linear_size, 10) self.alpha = alpha def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Mahoumaru/auto_LiRPA
cnn_4layer_LeakyRelu
false
11,686
[ "BSD-3-Clause" ]
0
b03a6c36eb1b921726778359d6d2b94e0cd7e480
https://github.com/Mahoumaru/auto_LiRPA/tree/b03a6c36eb1b921726778359d6d2b94e0cd7e480
ReOrgLayer
import torch from torch import nn class ReOrgLayer(nn.Module): def __init__(self, stride=2): super(ReOrgLayer, self).__init__() self.stride = stride def forward(self, x): assert x.data.dim() == 4 B, C, H, W = x.data.shape hs = self.stride ws = self.stride assert H % hs == 0, 'The stride ' + str(self.stride ) + ' is not a proper divisor of height ' + str(H) assert W % ws == 0, 'The stride ' + str(self.stride ) + ' is not a proper divisor of height ' + str(W) x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3 ).contiguous() x = x.view(B, C, H // hs * W // ws, hs, ws) x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(-1, -2 ).contiguous() x = x.view(B, C, ws * hs, H // ws, W // ws).transpose(1, 2).contiguous( ) x = x.view(B, C * ws * hs, H // ws, W // ws) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 2 x3 = xindex // 2 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (2 * x2 + 4 * (y0 // 2) + 8 * x3 + 64 * y1 + y0 % 2), xmask & ymask) tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), class ReOrgLayerNew(nn.Module): def __init__(self, stride=2): super(ReOrgLayerNew, self).__init__() self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MaoXianXin/pytorchx
ReOrgLayer
false
11,687
[ "MIT" ]
0
f46cc9692c3bd11ea9d5d54c20de3ac2f67dabcc
https://github.com/MaoXianXin/pytorchx/tree/f46cc9692c3bd11ea9d5d54c20de3ac2f67dabcc
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, inputLayer): super(Net, self).__init__() self.fc1 = nn.Linear(inputLayer, 100) self.fc2 = nn.Linear(100, 2) def forward(self, x): x = self.fc1(x) x = F.tanh(x) x = self.fc2(x) return x def predict(self, x): pred = F.softmax(self.forward(x)) ans = [] for t in pred: if t[0] > t[1]: ans.append(0) else: ans.append(1) return torch.tensor(ans) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'inputLayer': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 6400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (100, 4), (4, 1)) assert_size_stride(primals_2, (100,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (2, 100), (100, 1)) assert_size_stride(primals_5, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(6400)](buf1, primals_2, 6400, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 2), (1, 100), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, primals_4 class NetNew(nn.Module): def __init__(self, inputLayer): super(NetNew, self).__init__() self.fc1 = nn.Linear(inputLayer, 100) self.fc2 = nn.Linear(100, 2) def predict(self, x): pred = F.softmax(self.forward(x)) ans = [] for t in pred: if t[0] > t[1]: ans.append(0) else: ans.append(1) return torch.tensor(ans) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Marissa4/RPyCA
Net
false
11,688
[ "MIT" ]
0
e3c229361a4cd9ddd53accc5541b7c8b5f8939e0
https://github.com/Marissa4/RPyCA/tree/e3c229361a4cd9ddd53accc5541b7c8b5f8939e0
MaxPoolStride1
import torch from torch import nn from torch.nn import functional as F class MaxPoolStride1(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, x): padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode='replicate') pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 % 2 x2 = xindex // 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3 )) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp5 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 3 * x1) + 3 * x1 * (3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * ( 1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1) * (1 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp13 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 1 + 3 * x1) + (1 + 3 * x1 ) * (1 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1) * (2 + 3 * x1 < 3)) + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask) tmp21 = tl.load(in_ptr0 + (3 + 4 * (3 * (3 <= 2 + 3 * x1) + (2 + 3 * x1 ) * (2 + 3 * x1 < 3)) + 16 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 3 * x0) + 3 * x0 * (3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 1 + 3 * x0) + (1 + 3 * x0) * (1 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (12 + 16 * x2 + (3 * (3 <= 2 + 3 * x0) + (2 + 3 * x0) * (2 + 3 * x0 < 3))), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tl.store(out_ptr0 + x4, tmp30, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 return buf0, class MaxPoolStride1New(nn.Module): def __init__(self, kernel_size): super(MaxPoolStride1New, self).__init__() self.kernel_size = kernel_size self.pad = kernel_size - 1 def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MaoXianXin/pytorchx
MaxPoolStride1
false
11,689
[ "MIT" ]
0
f46cc9692c3bd11ea9d5d54c20de3ac2f67dabcc
https://github.com/MaoXianXin/pytorchx/tree/f46cc9692c3bd11ea9d5d54c20de3ac2f67dabcc
Network
import torch import torch.nn as nn import torch.nn.functional as F class Network(nn.Module): def __init__(self, input_size, nb_action): super(Network, self).__init__() self.input_size = input_size self.nb_action = nb_action self.fc1 = nn.Linear(input_size, 30) self.fc2 = nn.Linear(30, nb_action) def forward(self, state): x = F.relu(self.fc1(state)) q_values = self.fc2(x) return q_values def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'nb_action': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1920 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 30 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (30, 4), (4, 1)) assert_size_stride(primals_2, (30,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 30), (30, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 30), (30, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 30), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 30), (480, 120, 30, 1), 0) del buf0 buf3 = empty_strided_cuda((4, 4, 4, 30), (480, 120, 30, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(1920)](buf1, primals_2, buf3, 1920, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 30), (30, 1), 0), reinterpret_tensor(primals_4, (30, 4), (1, 30), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 30), (30, 1), 0), primals_4, buf3 class NetworkNew(nn.Module): def __init__(self, input_size, nb_action): super(NetworkNew, self).__init__() self.input_size = input_size self.nb_action = nb_action self.fc1 = nn.Linear(input_size, 30) self.fc2 = nn.Linear(30, nb_action) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
MarcoPerdomo/Self-Automated-Driving_Car
Network
false
11,690
[ "MIT" ]
0
943bf53a8b0dd26f8370b943d879e7dbaadb2201
https://github.com/MarcoPerdomo/Self-Automated-Driving_Car/tree/943bf53a8b0dd26f8370b943d879e7dbaadb2201
QNetwork
import torch import torch.nn.functional as F import torch.nn as nn class QNetwork(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=20, fc2_units=80): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(QNetwork, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, state): """Build a network that maps state -> action values.""" x = torch.tanh(self.fc1(state)) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 5120 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 80 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (20, 4), (4, 1)) assert_size_stride(primals_2, (20,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (80, 20), (20, 1)) assert_size_stride(primals_5, (80,), (1,)) assert_size_stride(primals_6, (4, 80), (80, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 20), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 20), (320, 80, 20, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(1280)](buf1, primals_2, 1280, XBLOCK= 256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 80), (80, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 20), (20, 1), 0), reinterpret_tensor(primals_4, (20, 80), (1, 20), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 80), (1280, 320, 80, 1), 0) del buf2 buf5 = empty_strided_cuda((4, 4, 4, 80), (1280, 320, 80, 1), torch.bool ) triton_poi_fused_relu_threshold_backward_1[grid(5120)](buf3, primals_5, buf5, 5120, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 80), (80, 1), 0), reinterpret_tensor(primals_6, (80, 4), (1, 80), 0), alpha=1, beta=1, out=buf4) del primals_7 return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf3, (64, 80), (80, 1), 0 ), primals_6, buf5, primals_4 class QNetworkNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=20, fc2_units=80): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(QNetworkNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Mavrepis/DeepLearning_FoodSafety
QNetwork
false
11,691
[ "MIT" ]
0
4f70b575036b06cd0edd4fdf9fc9303728872fc1
https://github.com/Mavrepis/DeepLearning_FoodSafety/tree/4f70b575036b06cd0edd4fdf9fc9303728872fc1
DotProduct
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn class DotProduct(nn.Module): def forward(self, x: 'torch.Tensor', y: 'torch.Tensor') ->torch.Tensor: """ Inputs: x - (N, F) y - (N, F) Output: output - (N, 1) dot-product output """ assert len(x.shape) == 2 assert len(y.shape) == 2 assert x.shape == y.shape x = nn.functional.normalize(x, dim=1) y = nn.functional.normalize(y, dim=1) output = torch.matmul(x.unsqueeze(1), y.unsqueeze(2)).squeeze(2) return output def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_0[grid(16)](arg1_1, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg1_1 buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 1, 4), (4, 0, 1), 0 ), reinterpret_tensor(buf1, (4, 4, 1), (4, 1, 0), 0), out=buf2) del buf0 del buf1 return reinterpret_tensor(buf2, (4, 1), (1, 1), 0), class DotProductNew(nn.Module): def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
MicroTensor-ai/episodic-memory
DotProduct
false
11,692
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
SeparableBlock
from torch.nn import Module import torch from torch.nn import Linear class SeparableBlock(Module): def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size): super(SeparableBlock, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.kernel_channels_in = kernel_channels_in self.kernel_channels_out = kernel_channels_out self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in) self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out) self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in) self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out) def forward(self, features): features = features.view(-1, self.input_size) kernel_in = self.make_kernel_in(features).view(-1, self.kernel_size, self.kernel_size, 1, self.kernel_channels_in) kernel_out = self.make_kernel_out(features).view(-1, self. kernel_size, self.kernel_size, self.kernel_channels_out, 1) kernel = torch.matmul(kernel_out, kernel_in) kernel = self.kernel_linear_in(kernel).permute(0, 1, 2, 4, 3) kernel = self.kernel_linear_out(kernel) kernel = kernel.permute(0, 4, 3, 1, 2) return kernel def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'kernel_channels_in': 4, 'kernel_channels_out': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch.nn import Linear assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (64, 4), (4, 1)) assert_size_stride(primals_3, (64,), (1,)) assert_size_stride(primals_4, (64, 4), (4, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((64, 64), (64, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((1024, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (1024, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024, 1, 4), (4, 4, 1), 0), out=buf2) buf3 = empty_strided_cuda((4096, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3) buf4 = empty_strided_cuda((64, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4096, 4)](buf3, primals_7, buf4, 4096, 4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf5 = buf3 del buf3 extern_kernels.mm(reinterpret_tensor(buf4, (4096, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5) buf6 = reinterpret_tensor(buf5, (64, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0) del buf5 triton_poi_fused_add_1[grid(16384)](buf6, primals_9, 16384, XBLOCK= 256, num_warps=4, num_stages=1) del primals_9 return reinterpret_tensor(buf6, (64, 4, 4, 4, 4), (256, 1, 4, 64, 16), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor( buf4, (4096, 4), (4, 1), 0), primals_8, primals_6, reinterpret_tensor( buf1, (1024, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024, 4, 1), (4, 1, 4), 0) class SeparableBlockNew(Module): def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size): super(SeparableBlockNew, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.kernel_channels_in = kernel_channels_in self.kernel_channels_out = kernel_channels_out self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in) self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out) self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in) self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out) def forward(self, input_0): primals_2 = self.make_kernel_in.weight primals_3 = self.make_kernel_in.bias primals_4 = self.make_kernel_out.weight primals_5 = self.make_kernel_out.bias primals_6 = self.kernel_linear_in.weight primals_7 = self.kernel_linear_in.bias primals_8 = self.kernel_linear_out.weight primals_9 = self.kernel_linear_out.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Kiberchaika/hyperstyle
SeparableBlock
false
11,693
[ "MIT" ]
0
b67e5ca9c67dfdfa18f1d6cda6e8eff5da07db7b
https://github.com/Kiberchaika/hyperstyle/tree/b67e5ca9c67dfdfa18f1d6cda6e8eff5da07db7b
CmapPafHeadAttention
import torch import torch.utils.data import torch.nn import torch.optim class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadAttention(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadAttention, self).__init__() self.cmap_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.paf_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.cmap_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, x): xc = self.cmap_up(x) ac = torch.sigmoid(self.cmap_att(xc)) xp = self.paf_up(x) ap = torch.tanh(self.paf_att(xp)) return self.cmap_conv(xc * ac), self.paf_conv(xp * ap) def get_inputs(): return [torch.rand([4, 256, 64, 64])] def get_init_inputs(): return [[], {'input_channels': 4, 'cmap_channels': 4, 'paf_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch.nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_mul_sigmoid_tanh_2(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp3 = tl.load(in_out_ptr1 + x2, None) tmp4 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x2, None) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tl.sigmoid(tmp5) tmp8 = tmp6 * tmp7 tmp9 = libdevice.tanh(tmp2) tmp10 = tmp6 * tmp9 tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(in_out_ptr1 + x2, tmp5, None) tl.store(out_ptr0 + x2, tmp8, None) tl.store(out_ptr1 + x2, tmp10, None) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 256, 64, 64), (1048576, 4096, 64, 1)) assert_size_stride(primals_2, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (256,), (1,)) assert_size_stride(primals_4, (256, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(1024, 4096)](primals_1, buf0, 1024, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_2, buf1, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_1[grid(65536, 9)](primals_4, buf2, 65536, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf0, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf5 = extern_kernels.convolution(buf0, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf6 = buf5 del buf5 buf4 = buf3 del buf3 buf7 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256 ), torch.float32) buf10 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32) triton_poi_fused_convolution_mul_sigmoid_tanh_2[grid(4194304)](buf6, buf4, primals_5, primals_3, buf0, buf7, buf10, 4194304, XBLOCK= 512, num_warps=8, num_stages=1) del primals_3 del primals_5 buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 64, 64), (16384, 1, 256, 4)) buf9 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) triton_poi_fused_convolution_3[grid(16, 4096)](buf8, primals_7, buf9, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_7 buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 4, 64, 64), (16384, 1, 256, 4)) buf12 = reinterpret_tensor(buf8, (4, 4, 64, 64), (16384, 4096, 64, 1), 0) del buf8 triton_poi_fused_convolution_3[grid(16, 4096)](buf11, primals_9, buf12, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del buf11 del primals_9 return (buf9, buf12, buf0, buf1, buf2, primals_6, primals_8, buf4, buf6, buf7, buf10) class UpsampleCBR(torch.nn.Sequential): def __init__(self, input_channels, output_channels, count=1, num_flat=0): layers = [] for i in range(count): if i == 0: inch = input_channels else: inch = output_channels layers += [torch.nn.ConvTranspose2d(inch, output_channels, kernel_size=4, stride=2, padding=1), torch.nn.BatchNorm2d( output_channels), torch.nn.ReLU()] for i in range(num_flat): layers += [torch.nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1), torch.nn. BatchNorm2d(output_channels), torch.nn.ReLU()] super(UpsampleCBR, self).__init__(*layers) class CmapPafHeadAttentionNew(torch.nn.Module): def __init__(self, input_channels, cmap_channels, paf_channels, upsample_channels=256, num_upsample=0, num_flat=0): super(CmapPafHeadAttentionNew, self).__init__() self.cmap_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.paf_up = UpsampleCBR(input_channels, upsample_channels, num_upsample, num_flat) self.cmap_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.paf_att = torch.nn.Conv2d(upsample_channels, upsample_channels, kernel_size=3, stride=1, padding=1) self.cmap_conv = torch.nn.Conv2d(upsample_channels, cmap_channels, kernel_size=1, stride=1, padding=0) self.paf_conv = torch.nn.Conv2d(upsample_channels, paf_channels, kernel_size=1, stride=1, padding=0) def forward(self, input_0): primals_2 = self.cmap_att.weight primals_3 = self.cmap_att.bias primals_4 = self.paf_att.weight primals_5 = self.paf_att.bias primals_6 = self.cmap_conv.weight primals_7 = self.cmap_conv.bias primals_8 = self.paf_conv.weight primals_9 = self.paf_conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
J-C-Chang/human-pose-detect
CmapPafHeadAttention
false
11,694
[ "MIT" ]
0
092e6ec53aa5058d644a30269abff606b74e3bf3
https://github.com/J-C-Chang/human-pose-detect/tree/092e6ec53aa5058d644a30269abff606b74e3bf3
HighLightLayer
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class HighLightLayer(nn.Module): def __init__(self, dim): super(HighLightLayer, self).__init__() self.conv1d = Conv1D(in_dim=dim, out_dim=1, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, x, mask): logits = self.conv1d(x) logits = logits.squeeze(2) logits = mask_logits(logits, mask) scores = nn.Sigmoid()(logits) return scores @staticmethod def compute_loss(scores, labels, mask, epsilon=1e-12): labels = labels.type(torch.float32) weights = torch.where(labels == 0.0, labels + 1.0, 2.0 * labels) loss_per_location = nn.BCELoss(reduction='none')(scores, labels) loss_per_location = loss_per_location * weights mask = mask.type(torch.float32) loss = torch.sum(loss_per_location * mask) / (torch.sum(mask) + epsilon ) return loss def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr1 + x0, xmask) tmp3 = tmp0 + tmp2 tmp5 = 1.0 tmp6 = tmp5 - tmp4 tmp7 = -1e+30 tmp8 = tmp6 * tmp7 tmp9 = tmp3 + tmp8 tmp10 = tl.sigmoid(tmp9) tl.store(in_out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 4), (4, 4, 1)) del buf0 buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0) del buf1 triton_poi_fused_add_mul_rsub_sigmoid_1[grid(16)](buf2, primals_3, primals_4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 del primals_4 return buf2, primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf2 def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class HighLightLayerNew(nn.Module): def __init__(self, dim): super(HighLightLayerNew, self).__init__() self.conv1d = Conv1D(in_dim=dim, out_dim=1, kernel_size=1, stride=1, padding=0, bias=True) @staticmethod def compute_loss(scores, labels, mask, epsilon=1e-12): labels = labels.type(torch.float32) weights = torch.where(labels == 0.0, labels + 1.0, 2.0 * labels) loss_per_location = nn.BCELoss(reduction='none')(scores, labels) loss_per_location = loss_per_location * weights mask = mask.type(torch.float32) loss = torch.sum(loss_per_location * mask) / (torch.sum(mask) + epsilon ) return loss def forward(self, input_0, input_1): primals_2 = self.conv1d.conv1d.weight primals_3 = self.conv1d.conv1d.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
MicroTensor-ai/episodic-memory
HighLightLayer
false
11,695
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
enhance_net_nopool
import torch import torch.nn as nn import torch.nn.functional as F class CSDN_Tem(nn.Module): def __init__(self, in_ch, out_ch): super(CSDN_Tem, self).__init__() self.depth_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=3, padding=1, groups=in_ch) self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1) def forward(self, input): out = self.depth_conv(input) out = self.point_conv(out) return out class enhance_net_nopool(nn.Module): def __init__(self, scale_factor): super(enhance_net_nopool, self).__init__() self.relu = nn.ReLU(inplace=True) self.scale_factor = scale_factor self.upsample = nn.UpsamplingBilinear2d(scale_factor=self.scale_factor) number_f = 32 self.e_conv1 = CSDN_Tem(3, number_f) self.e_conv2 = CSDN_Tem(number_f, number_f) self.e_conv3 = CSDN_Tem(number_f, number_f) self.e_conv4 = CSDN_Tem(number_f, number_f) self.e_conv5 = CSDN_Tem(number_f * 2, number_f) self.e_conv6 = CSDN_Tem(number_f * 2, number_f) self.e_conv7 = CSDN_Tem(number_f * 2, 3) def enhance(self, x, x_r): for _ in range(8): x = x + x_r * (torch.pow(x, 2) - x) return x def forward(self, x): x_down = x if self.scale_factor == 1 else F.interpolate(x, scale_factor=1 / self.scale_factor, mode='bilinear') x1 = self.relu(self.e_conv1(x_down)) x2 = self.relu(self.e_conv2(x1)) x3 = self.relu(self.e_conv3(x2)) x4 = self.relu(self.e_conv4(x3)) x5 = self.relu(self.e_conv5(torch.cat([x3, x4], 1))) x6 = self.relu(self.e_conv6(torch.cat([x2, x5], 1))) x_r = torch.tanh(self.e_conv7(torch.cat([x1, x6], 1))) x_r = x_r if self.scale_factor == 1 else self.upsample(x_r) enhance_image = self.enhance(x, x_r) return enhance_image, x_r def get_inputs(): return [torch.rand([4, 3, 64, 64])] def get_init_inputs(): return [[], {'scale_factor': 1.0}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 64 x0 = xindex % 4096 x2 = xindex // 262144 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 32, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 64, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp6, other=0.0) tmp10 = tl.load(in_ptr2 + (-32 + x1), tmp6, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_add_convolution_mul_pow_sub_tanh_5(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 3 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + x3, None) tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tmp5 = tmp4 * tmp4 tmp6 = tmp5 - tmp4 tmp7 = tmp3 * tmp6 tmp8 = tmp4 + tmp7 tmp9 = tmp8 * tmp8 tmp10 = tmp9 - tmp8 tmp11 = tmp3 * tmp10 tmp12 = tmp8 + tmp11 tmp13 = tmp12 * tmp12 tmp14 = tmp13 - tmp12 tmp15 = tmp3 * tmp14 tmp16 = tmp12 + tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp17 - tmp16 tmp19 = tmp3 * tmp18 tmp20 = tmp16 + tmp19 tmp21 = tmp20 * tmp20 tmp22 = tmp21 - tmp20 tmp23 = tmp3 * tmp22 tmp24 = tmp20 + tmp23 tmp25 = tmp24 * tmp24 tmp26 = tmp25 - tmp24 tmp27 = tmp3 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tmp28 * tmp28 tmp30 = tmp29 - tmp28 tmp31 = tmp3 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp33 - tmp32 tmp35 = tmp3 * tmp34 tmp36 = tmp32 + tmp35 tl.store(in_out_ptr0 + x3, tmp3, None) tl.store(in_out_ptr1 + x3, tmp36, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 32 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29) = args args.clear() assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_2, (3, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_3, (3,), (1,)) assert_size_stride(primals_4, (32, 3, 1, 1), (3, 1, 1, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_7, (32,), (1,)) assert_size_stride(primals_8, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_9, (32,), (1,)) assert_size_stride(primals_10, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_11, (32,), (1,)) assert_size_stride(primals_12, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_13, (32,), (1,)) assert_size_stride(primals_14, (32, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_15, (32,), (1,)) assert_size_stride(primals_16, (32, 32, 1, 1), (32, 1, 1, 1)) assert_size_stride(primals_17, (32,), (1,)) assert_size_stride(primals_18, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_19, (64,), (1,)) assert_size_stride(primals_20, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_21, (32,), (1,)) assert_size_stride(primals_22, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_23, (64,), (1,)) assert_size_stride(primals_24, (32, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_25, (32,), (1,)) assert_size_stride(primals_26, (64, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_27, (64,), (1,)) assert_size_stride(primals_28, (3, 64, 1, 1), (64, 1, 1, 1)) assert_size_stride(primals_29, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=3, bias=None) assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152, XBLOCK=512, num_warps=4, num_stages=1) del primals_3 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_relu_1[grid(524288)](buf3, primals_5, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf4, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(524288)](buf5, primals_7, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_7 buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf7 = buf6 del buf6 triton_poi_fused_convolution_relu_1[grid(524288)](buf7, primals_9, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_9 buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf8, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf9 = buf8 del buf8 triton_poi_fused_convolution_2[grid(524288)](buf9, primals_11, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_11 buf10 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf11 = buf10 del buf10 triton_poi_fused_convolution_relu_1[grid(524288)](buf11, primals_13, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_13 buf12 = extern_kernels.convolution(buf11, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None) assert_size_stride(buf12, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf13 = buf12 del buf12 triton_poi_fused_convolution_2[grid(524288)](buf13, primals_15, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del primals_15 buf14 = extern_kernels.convolution(buf13, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf14, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf15 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(1048576)](buf11, buf14, primals_17, buf15, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf16 = extern_kernels.convolution(buf15, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf16, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf17 = buf16 del buf16 triton_poi_fused_convolution_4[grid(1048576)](buf17, primals_19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_19 buf18 = extern_kernels.convolution(buf17, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf18, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf19 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(1048576)](buf7, buf18, primals_21, buf19, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf20, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf21 = buf20 del buf20 triton_poi_fused_convolution_4[grid(1048576)](buf21, primals_23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_23 buf22 = extern_kernels.convolution(buf21, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf22, (4, 32, 64, 64), (131072, 4096, 64, 1)) buf23 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32) triton_poi_fused_cat_3[grid(1048576)](buf3, buf22, primals_25, buf23, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) buf24 = extern_kernels.convolution(buf23, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None) assert_size_stride(buf24, (4, 64, 64, 64), (262144, 4096, 64, 1)) buf25 = buf24 del buf24 triton_poi_fused_convolution_4[grid(1048576)](buf25, primals_27, 1048576, XBLOCK=1024, num_warps=4, num_stages=1) del primals_27 buf26 = extern_kernels.convolution(buf25, primals_28, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf26, (4, 3, 64, 64), (12288, 4096, 64, 1)) buf27 = buf26 del buf26 buf28 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32) buf29 = buf28 del buf28 buf30 = buf29 del buf29 triton_poi_fused_add_convolution_mul_pow_sub_tanh_5[grid(49152)](buf27, buf30, primals_29, primals_1, 49152, XBLOCK=256, num_warps=4, num_stages=1) del primals_29 buf31 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(524288)]( buf22, primals_25, buf31, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf22 del primals_25 buf32 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(524288)]( buf18, primals_21, buf32, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf18 del primals_21 buf33 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_6[grid(524288)]( buf14, primals_17, buf33, 524288, XBLOCK=1024, num_warps=4, num_stages=1) del buf14 del primals_17 return (buf30, buf27, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, buf17, buf19, buf21, buf23, buf25, buf27, buf31, buf32, buf33) class CSDN_Tem(nn.Module): def __init__(self, in_ch, out_ch): super(CSDN_Tem, self).__init__() self.depth_conv = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=3, padding=1, groups=in_ch) self.point_conv = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=1) def forward(self, input): out = self.depth_conv(input) out = self.point_conv(out) return out class enhance_net_nopoolNew(nn.Module): def __init__(self, scale_factor): super(enhance_net_nopoolNew, self).__init__() self.relu = nn.ReLU(inplace=True) self.scale_factor = scale_factor self.upsample = nn.UpsamplingBilinear2d(scale_factor=self.scale_factor) number_f = 32 self.e_conv1 = CSDN_Tem(3, number_f) self.e_conv2 = CSDN_Tem(number_f, number_f) self.e_conv3 = CSDN_Tem(number_f, number_f) self.e_conv4 = CSDN_Tem(number_f, number_f) self.e_conv5 = CSDN_Tem(number_f * 2, number_f) self.e_conv6 = CSDN_Tem(number_f * 2, number_f) self.e_conv7 = CSDN_Tem(number_f * 2, 3) def enhance(self, x, x_r): for _ in range(8): x = x + x_r * (torch.pow(x, 2) - x) return x def forward(self, input_0): primals_2 = self.e_conv1.depth_conv.weight primals_3 = self.e_conv1.depth_conv.bias primals_4 = self.e_conv1.point_conv.weight primals_5 = self.e_conv1.point_conv.bias primals_6 = self.e_conv2.depth_conv.weight primals_7 = self.e_conv2.depth_conv.bias primals_8 = self.e_conv2.point_conv.weight primals_9 = self.e_conv2.point_conv.bias primals_10 = self.e_conv3.depth_conv.weight primals_11 = self.e_conv3.depth_conv.bias primals_12 = self.e_conv3.point_conv.weight primals_13 = self.e_conv3.point_conv.bias primals_14 = self.e_conv4.depth_conv.weight primals_15 = self.e_conv4.depth_conv.bias primals_16 = self.e_conv4.point_conv.weight primals_17 = self.e_conv4.point_conv.bias primals_18 = self.e_conv5.depth_conv.weight primals_19 = self.e_conv5.depth_conv.bias primals_20 = self.e_conv5.point_conv.weight primals_21 = self.e_conv5.point_conv.bias primals_22 = self.e_conv6.depth_conv.weight primals_23 = self.e_conv6.depth_conv.bias primals_24 = self.e_conv6.point_conv.weight primals_25 = self.e_conv6.point_conv.bias primals_26 = self.e_conv7.depth_conv.weight primals_27 = self.e_conv7.depth_conv.bias primals_28 = self.e_conv7.point_conv.weight primals_29 = self.e_conv7.point_conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29]) return output[0], output[1]
Lundez/londogard-backend
enhance_net_nopool
false
11,696
[ "MIT" ]
0
90d9e405b832c2157e6fde00f58b9312cfc4ddbc
https://github.com/Lundez/londogard-backend/tree/90d9e405b832c2157e6fde00f58b9312cfc4ddbc
CQConcatenate
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class WeightedPool(nn.Module): def __init__(self, dim): super(WeightedPool, self).__init__() weight = torch.empty(dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight, requires_grad=True) def forward(self, x, mask): alpha = torch.tensordot(x, self.weight, dims=1) alpha = mask_logits(alpha, mask=mask.unsqueeze(2)) alphas = nn.Softmax(dim=1)(alpha) pooled_x = torch.matmul(x.transpose(1, 2), alphas) pooled_x = pooled_x.squeeze(2) return pooled_x class CQConcatenate(nn.Module): def __init__(self, dim): super(CQConcatenate, self).__init__() self.weighted_pool = WeightedPool(dim=dim) self.conv1d = Conv1D(in_dim=2 * dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, context, query, q_mask): pooled_query = self.weighted_pool(query, q_mask) _, c_seq_len, _ = context.shape pooled_query = pooled_query.unsqueeze(1).repeat(1, c_seq_len, 1) output = torch.cat([context, pooled_query], dim=2) output = self.conv1d(output) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -1e+30 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tl.store(out_ptr0 + x0, tmp24, xmask) tl.store(out_ptr1 + x0, tmp35, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -1e+30 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp8 = tmp6 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(in_out_ptr0 + x2, tmp11, xmask) @triton.jit def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x3 = xindex // 8 x2 = xindex // 32 x4 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x3 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_convolution_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 32 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 8 y1 = yindex // 8 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 8 * x2 + 32 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_5, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) buf2 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_mul_rsub_0[grid(4)](buf0, primals_3, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf0, (4, 4, 1), (4, 1, 1), 0) del buf0 triton_poi_fused__softmax_add_mul_rsub_1[grid(16)](buf3, primals_3, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf1 del buf2 del primals_3 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), buf3, out=buf4) buf5 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_2[grid(128)](primals_4, buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf4 del primals_4 buf6 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_convolution_3[grid(32, 4)](buf5, buf6, 32, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf7 = extern_kernels.convolution(buf6, primals_5, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf7, (4, 4, 4), (16, 4, 1)) del buf6 buf8 = buf7 del buf7 triton_poi_fused_convolution_4[grid(64)](buf8, primals_6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 return reinterpret_tensor(buf8, (4, 4, 4), (16, 1, 4), 0 ), primals_2, primals_5, buf3, reinterpret_tensor(buf5, (4, 8, 4), (32, 1, 8), 0) def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class WeightedPool(nn.Module): def __init__(self, dim): super(WeightedPool, self).__init__() weight = torch.empty(dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight, requires_grad=True) def forward(self, x, mask): alpha = torch.tensordot(x, self.weight, dims=1) alpha = mask_logits(alpha, mask=mask.unsqueeze(2)) alphas = nn.Softmax(dim=1)(alpha) pooled_x = torch.matmul(x.transpose(1, 2), alphas) pooled_x = pooled_x.squeeze(2) return pooled_x class CQConcatenateNew(nn.Module): def __init__(self, dim): super(CQConcatenateNew, self).__init__() self.weighted_pool = WeightedPool(dim=dim) self.conv1d = Conv1D(in_dim=2 * dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, input_0, input_1, input_2): primals_1 = self.weighted_pool.weight primals_5 = self.conv1d.conv1d.weight primals_6 = self.conv1d.conv1d.bias primals_2 = input_0 primals_4 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
MicroTensor-ai/episodic-memory
CQConcatenate
false
11,697
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
GEGLU
import torch import torch.nn.functional as F from torch import nn class GEGLU(nn.Module): def forward(self, x): x, gates = x.chunk(2, dim=-1) return F.gelu(gates) * x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask) tmp9 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp8 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK= 128, num_warps=4, num_stages=1) del arg0_1 return buf0, class GEGLUNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mohan-Zhang-u/vit-pytorch
GEGLU
false
11,698
[ "MIT" ]
0
76050c812474d7c10d67db4e811f537e26c3996a
https://github.com/Mohan-Zhang-u/vit-pytorch/tree/76050c812474d7c10d67db4e811f537e26c3996a
Actor
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Actor(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state): """Build an actor (policy) network that maps states -> actions.""" x = F.relu(self.fc1(state)) x = F.relu(self.fc2(x)) return F.tanh(self.fc3(x)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (128, 128), (128, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (4, 128), (128, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 buf7 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1, primals_2, buf7, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf2 buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3, primals_5, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 4), (1, 128), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_tanh_1[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 128), (128, 1), 0 ), reinterpret_tensor(buf3, (64, 128), (128, 1), 0 ), buf5, primals_6, buf6, primals_4, buf7 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class ActorNew(nn.Module): """Actor (Policy) Model.""" def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(ActorNew, self).__init__() self.seed = torch.manual_seed(seed) self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) self.reset_parameters() def reset_parameters(self): self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Mika412/deep-reinforcement-learning
Actor
false
11,699
[ "MIT" ]
0
9b5ba901f760e50cd64d272939eff75881af5a9c
https://github.com/Mika412/deep-reinforcement-learning/tree/9b5ba901f760e50cd64d272939eff75881af5a9c
Critic
import torch import numpy as np import torch.nn.functional as F import torch.nn as nn def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class Critic(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=128, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, state, action): """Build a critic (value) network that maps (state, action) pairs -> Q-values.""" xs = F.relu(self.fcs1(state)) x = torch.cat((xs, action), dim=1) x = F.relu(self.fc2(x)) return self.fc3(x) def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 528 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 132 x1 = xindex // 132 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 128, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (128 * x1 + x0), tmp4 & xmask, eviction_policy ='evict_last', other=0.0) tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full([1], 0, tl.int32) tmp9 = triton_helpers.maximum(tmp8, tmp7) tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype) tmp11 = tl.where(tmp4, tmp9, tmp10) tmp12 = tmp0 >= tmp3 tl.full([1], 132, tl.int64) tmp15 = tl.load(in_ptr2 + (4 * x1 + (-128 + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.where(tmp4, tmp11, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (128, 132), (132, 1)) assert_size_stride(primals_6, (128,), (1,)) assert_size_stride(primals_7, (1, 128), (128, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 132), (132, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(528)](buf0, primals_2, primals_4, buf1, 528, XBLOCK=256, num_warps=4, num_stages=1) del primals_4 buf2 = empty_strided_cuda((4, 128), (128, 1), torch.float32) extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (132, 128), ( 1, 132), 0), out=buf2) buf3 = buf2 del buf2 triton_poi_fused_relu_1[grid(512)](buf3, primals_6, 512, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf5) del primals_8 buf6 = empty_strided_cuda((4, 128), (128, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(512)](buf0, primals_2, buf6, 512, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_2 return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6 def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1.0 / np.sqrt(fan_in) return -lim, lim class CriticNew(nn.Module): """Critic (Value) Model.""" def __init__(self, state_size, action_size, seed, fcs1_units=128, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(CriticNew, self).__init__() self.seed = torch.manual_seed(seed) self.fcs1 = nn.Linear(state_size, fcs1_units) self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-0.003, 0.003) def forward(self, input_0, input_1): primals_1 = self.fcs1.weight primals_2 = self.fcs1.bias primals_5 = self.fc2.weight primals_6 = self.fc2.bias primals_7 = self.fc3.weight primals_8 = self.fc3.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
Mika412/deep-reinforcement-learning
Critic
false
11,700
[ "MIT" ]
0
9b5ba901f760e50cd64d272939eff75881af5a9c
https://github.com/Mika412/deep-reinforcement-learning/tree/9b5ba901f760e50cd64d272939eff75881af5a9c
Conv1D
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4), (16, 4, 1)) del buf0 buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0 ), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0) class Conv1DNew(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1DNew, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, input_0): primals_2 = self.conv1d.weight primals_3 = self.conv1d.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
MicroTensor-ai/episodic-memory
Conv1D
false
11,701
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
WeightedPool
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class WeightedPool(nn.Module): def __init__(self, dim): super(WeightedPool, self).__init__() weight = torch.empty(dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight, requires_grad=True) def forward(self, x, mask): alpha = torch.tensordot(x, self.weight, dims=1) alpha = mask_logits(alpha, mask=mask.unsqueeze(2)) alphas = nn.Softmax(dim=1)(alpha) pooled_x = torch.matmul(x.transpose(1, 2), alphas) pooled_x = pooled_x.squeeze(2) return pooled_x def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 % 16 x3 = xindex // 64 x5 = xindex % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (16 + x4), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (32 + x4), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (48 + x4), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -1e+30 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp9 = tmp2 - tmp8 tmp10 = tmp9 * tmp4 tmp11 = tmp7 + tmp10 tmp12 = triton_helpers.maximum(tmp6, tmp11) tmp15 = tmp2 - tmp14 tmp16 = tmp15 * tmp4 tmp17 = tmp13 + tmp16 tmp18 = triton_helpers.maximum(tmp12, tmp17) tmp21 = tmp2 - tmp20 tmp22 = tmp21 * tmp4 tmp23 = tmp19 + tmp22 tmp24 = triton_helpers.maximum(tmp18, tmp23) tmp25 = tmp6 - tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = tmp11 - tmp24 tmp28 = tl_math.exp(tmp27) tmp29 = tmp26 + tmp28 tmp30 = tmp17 - tmp24 tmp31 = tl_math.exp(tmp30) tmp32 = tmp29 + tmp31 tmp33 = tmp23 - tmp24 tmp34 = tl_math.exp(tmp33) tmp35 = tmp32 + tmp34 tl.store(out_ptr0 + x6, tmp24, xmask) tl.store(out_ptr1 + x6, tmp35, xmask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 % 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x5, tmp0, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x5 = xindex // 4 % 64 x6 = xindex % 16 x7 = xindex // 64 x4 = xindex // 256 x8 = xindex % 64 x9 = xindex tmp0 = tl.load(in_ptr0 + x5, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x6 + 16 * x7), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr2 + (x8 + 64 * x4), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr3 + (x8 + 64 * x4), xmask, eviction_policy= 'evict_last') tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp4 = -1e+30 tmp5 = tmp3 * tmp4 tmp6 = tmp0 + tmp5 tmp8 = tmp6 - tmp7 tmp9 = tl_math.exp(tmp8) tmp11 = tmp9 / tmp10 tl.store(out_ptr0 + x9, tmp11, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1), torch.float32) buf2 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_add_mul_rsub_0[grid(256)](buf0, primals_3, buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(1024)](primals_2, buf3, 1024, XBLOCK= 128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_add_mul_rsub_2[grid(1024)](buf0, primals_3, buf1, buf2, buf4, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (64, 4, 4), (16, 4, 1), 0), out=buf5) del buf4 return reinterpret_tensor(buf5, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0 ), primals_3, buf0, reinterpret_tensor(buf3, (64, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0) def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class WeightedPoolNew(nn.Module): def __init__(self, dim): super(WeightedPoolNew, self).__init__() weight = torch.empty(dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight, requires_grad=True) def forward(self, input_0, input_1): primals_1 = self.weight primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3]) return output[0]
MicroTensor-ai/episodic-memory
WeightedPool
false
11,702
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
ELUPlus
import torch from torch import nn import torch.nn class ELUPlus(nn.Module): def __init__(self): super().__init__() self.elu = nn.ELU() def forward(self, x): return self.elu(x) + 1.0 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_elu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ELUPlusNew(nn.Module): def __init__(self): super().__init__() self.elu = nn.ELU() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
MilesCranmer/nflows
ELUPlus
false
11,703
[ "MIT" ]
0
6e2a267ad0f4ddc84e1db5592ce3c3e4551a7555
https://github.com/MilesCranmer/nflows/tree/6e2a267ad0f4ddc84e1db5592ce3c3e4551a7555
FrameMaxPool
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn class FrameMaxPool(nn.Module): def __init__(self, input_size, hidden_size, stride): super(FrameMaxPool, self).__init__() self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1) self.max_pool = nn.MaxPool1d(stride) def forward(self, visual_input): vis_h = torch.relu(self.vis_conv(visual_input)) vis_h = self.max_pool(vis_h) return vis_h def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_0( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tl.full([1], 0, tl.int8) tmp6 = 0.0 tmp7 = tmp4 <= tmp6 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp5, xmask) tl.store(out_ptr1 + x2, tmp4, xmask) tl.store(out_ptr2 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.int8) buf3 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_0[grid (16)](buf1, primals_2, buf2, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf3, (4, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0 ), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf2, buf4 class FrameMaxPoolNew(nn.Module): def __init__(self, input_size, hidden_size, stride): super(FrameMaxPoolNew, self).__init__() self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1) self.max_pool = nn.MaxPool1d(stride) def forward(self, input_0): primals_1 = self.vis_conv.weight primals_2 = self.vis_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
MicroTensor-ai/episodic-memory
FrameMaxPool
false
11,704
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
NN
import torch import torch.nn.functional as F import torch.nn as nn class NN(nn.Module): def __init__(self, input_size): super().__init__() self.fc1 = nn.Linear(input_size, 50) self.fc2 = nn.Linear(50, 40) self.fc3 = nn.Linear(40, 20) self.fc4 = nn.Linear(20, 9) self.dropout = nn.Dropout(0.15) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = self.fc4(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 3200 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 50 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2560 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 40 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 20 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (50, 4), (4, 1)) assert_size_stride(primals_2, (50,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (40, 50), (50, 1)) assert_size_stride(primals_5, (40,), (1,)) assert_size_stride(primals_6, (20, 40), (40, 1)) assert_size_stride(primals_7, (20,), (1,)) assert_size_stride(primals_8, (9, 20), (20, 1)) assert_size_stride(primals_9, (9,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 50), (50, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 50), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 50), (800, 200, 50, 1), 0) del buf0 buf9 = empty_strided_cuda((4, 4, 4, 50), (800, 200, 50, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(3200)](buf1, primals_2, buf9, 3200, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 40), (40, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor(primals_4, (50, 40), (1, 50), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 40), (640, 160, 40, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 40), (640, 160, 40, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(2560)](buf3, primals_5, buf8, 2560, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((64, 20), (20, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 40), (40, 1), 0), reinterpret_tensor(primals_6, (40, 20), (1, 40), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 20), (320, 80, 20, 1), 0) del buf4 buf7 = empty_strided_cuda((4, 4, 4, 20), (320, 80, 20, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(1280)](buf5, primals_7, buf7, 1280, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 9), (9, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 20), (20, 1), 0), reinterpret_tensor(primals_8, (20, 9), (1, 20), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf6, (4, 4, 4, 9), (144, 36, 9, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 50), (50, 1), 0), reinterpret_tensor( buf3, (64, 40), (40, 1), 0), reinterpret_tensor(buf5, (64, 20), (20, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9 class NNNew(nn.Module): def __init__(self, input_size): super().__init__() self.fc1 = nn.Linear(input_size, 50) self.fc2 = nn.Linear(50, 40) self.fc3 = nn.Linear(40, 20) self.fc4 = nn.Linear(20, 9) self.dropout = nn.Dropout(0.15) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Meydand2001/Machine-Learning-project
NN
false
11,705
[ "MIT" ]
0
dc73bc3820024939ba66a1a3e2ae130d6bf35f9a
https://github.com/Meydand2001/Machine-Learning-project/tree/dc73bc3820024939ba66a1a3e2ae130d6bf35f9a
L2Norm
import torch from torch import nn class L2Norm(nn.Module): def forward(self, x, eps=1e-06): norm = x.norm(dim=1, keepdim=True).clamp(min=eps) return x / norm def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-06 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_linalg_vector_norm_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class L2NormNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Mohan-Zhang-u/vit-pytorch
L2Norm
false
11,706
[ "MIT" ]
0
76050c812474d7c10d67db4e811f537e26c3996a
https://github.com/Mohan-Zhang-u/vit-pytorch/tree/76050c812474d7c10d67db4e811f537e26c3996a
Downsample
import torch from torch import nn class Downsample(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.conv = nn.Conv2d(dim_in, dim_out, 3, stride=2, padding=1) def forward(self, x): return self.conv(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_in': 4, 'dim_out': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class DownsampleNew(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.conv = nn.Conv2d(dim_in, dim_out, 3, stride=2, padding=1) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mohan-Zhang-u/vit-pytorch
Downsample
false
11,707
[ "MIT" ]
0
76050c812474d7c10d67db4e811f537e26c3996a
https://github.com/Mohan-Zhang-u/vit-pytorch/tree/76050c812474d7c10d67db4e811f537e26c3996a
CQAttention
import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class CQAttention(nn.Module): def __init__(self, dim, drop_rate=0.0): super(CQAttention, self).__init__() w4C = torch.empty(dim, 1) w4Q = torch.empty(dim, 1) w4mlu = torch.empty(1, 1, dim) nn.init.xavier_uniform_(w4C) nn.init.xavier_uniform_(w4Q) nn.init.xavier_uniform_(w4mlu) self.w4C = nn.Parameter(w4C, requires_grad=True) self.w4Q = nn.Parameter(w4Q, requires_grad=True) self.w4mlu = nn.Parameter(w4mlu, requires_grad=True) self.dropout = nn.Dropout(p=drop_rate) self.cqa_linear = Conv1D(in_dim=4 * dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) def forward(self, context, query, c_mask, q_mask): score = self.trilinear_attention(context, query) score_ = nn.Softmax(dim=2)(mask_logits(score, q_mask.unsqueeze(1))) score_t = nn.Softmax(dim=1)(mask_logits(score, c_mask.unsqueeze(2))) score_t = score_t.transpose(1, 2) c2q = torch.matmul(score_, query) q2c = torch.matmul(torch.matmul(score_, score_t), context) output = torch.cat([context, c2q, torch.mul(context, c2q), torch. mul(context, q2c)], dim=2) output = self.cqa_linear(output) return output def trilinear_attention(self, context, query): _batch_size, c_seq_len, _dim = context.shape _batch_size, q_seq_len, _dim = query.shape context = self.dropout(context) query = self.dropout(query) subres0 = torch.matmul(context, self.w4C).expand([-1, -1, q_seq_len]) subres1 = torch.matmul(query, self.w4Q).transpose(1, 2).expand([-1, c_seq_len, -1]) subres2 = torch.matmul(context * self.w4mlu, query.transpose(1, 2)) res = subres0 + subres1 + subres2 return res def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4] ), torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + 4 * x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp15 = tl.load(in_ptr3 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp24 = tl.load(in_ptr3 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp33 = tl.load(in_ptr3 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = -1e+30 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tmp0 + tmp11 tmp14 = tmp12 + tmp13 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp8 tmp18 = tmp14 + tmp17 tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp21 = tmp0 + tmp20 tmp23 = tmp21 + tmp22 tmp25 = tmp6 - tmp24 tmp26 = tmp25 * tmp8 tmp27 = tmp23 + tmp26 tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp30 = tmp0 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp6 - tmp33 tmp35 = tmp34 * tmp8 tmp36 = tmp32 + tmp35 tmp37 = triton_helpers.maximum(tmp28, tmp36) tl.store(out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask) tmp5 = tl.load(in_ptr3 + 4 * x1, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask) tmp15 = tl.load(in_ptr3 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp22 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask) tmp24 = tl.load(in_ptr3 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask) tmp33 = tl.load(in_ptr3 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = -1e+30 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tmp11 + tmp1 tmp14 = tmp12 + tmp13 tmp16 = tmp6 - tmp15 tmp17 = tmp16 * tmp8 tmp18 = tmp14 + tmp17 tmp19 = triton_helpers.maximum(tmp10, tmp18) tmp21 = tmp20 + tmp1 tmp23 = tmp21 + tmp22 tmp25 = tmp6 - tmp24 tmp26 = tmp25 * tmp8 tmp27 = tmp23 + tmp26 tmp28 = triton_helpers.maximum(tmp19, tmp27) tmp30 = tmp29 + tmp1 tmp32 = tmp30 + tmp31 tmp34 = tmp6 - tmp33 tmp35 = tmp34 * tmp8 tmp36 = tmp32 + tmp35 tmp37 = triton_helpers.maximum(tmp28, tmp36) tl.store(out_ptr0 + x2, tmp37, xmask) @triton.jit def triton_poi_fused__softmax_add_mul_rsub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex // 4 x0 = xindex % 4 x2 = xindex // 16 x4 = xindex tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr2 + x4, xmask) tmp5 = tl.load(in_ptr3 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr4 + x3, xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr5 + x3, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr6 + (x0 + 4 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = 1.0 tmp7 = tmp6 - tmp5 tmp8 = -1e+30 tmp9 = tmp7 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tmp10 - tmp11 tmp13 = tl_math.exp(tmp12) tmp15 = tmp6 - tmp14 tmp16 = tmp15 * tmp8 tmp17 = tmp4 + tmp16 tmp19 = tmp17 - tmp18 tmp20 = tl_math.exp(tmp19) tl.store(out_ptr0 + x4, tmp13, xmask) tl.store(out_ptr1 + x4, tmp20, xmask) @triton.jit def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_cat_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tl.load(in_ptr1 + (4 * x1 + (-8 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp17 = tmp15 * tmp16 tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype) tmp19 = tl.where(tmp14, tmp17, tmp18) tmp20 = tmp0 >= tmp12 tl.full([1], 16, tl.int64) tmp23 = tl.load(in_ptr0 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tl.load(in_ptr2 + (4 * x1 + (-12 + x0)), tmp20 & xmask, eviction_policy='evict_last', other=0.0) tmp25 = tmp23 * tmp24 tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype) tmp27 = tl.where(tmp20, tmp25, tmp26) tmp28 = tl.where(tmp14, tmp19, tmp27) tmp29 = tl.where(tmp9, tmp10, tmp28) tmp30 = tl.where(tmp4, tmp5, tmp29) tl.store(out_ptr0 + x2, tmp30, xmask) @triton.jit def triton_poi_fused_convolution_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 1), (1, 1)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (1, 1, 4), (4, 4, 1)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4, 16, 1), (16, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_4, out=buf1) del primals_4 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(64)](primals_1, primals_5, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf2, reinterpret_tensor(primals_2, (4, 4, 4), ( 16, 1, 4), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_add_mul_rsub_1[grid(16)](buf0, buf1, buf3, primals_6, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32) triton_poi_fused__softmax_add_mul_rsub_2[grid(16)](buf0, buf1, buf3, primals_7, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = buf2 del buf2 buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_add_mul_rsub_3[grid(64)](buf0, buf1, buf3, primals_6, buf4, primals_7, buf7, buf5, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf0 del buf1 del buf4 del buf7 del primals_6 del primals_7 buf6 = buf3 del buf3 triton_poi_fused__softmax_4[grid(64)](buf5, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = buf5 del buf5 triton_poi_fused__softmax_5[grid(64)](buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) buf10 = buf8 del buf8 extern_kernels.bmm(buf6, primals_2, out=buf10) buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf6, reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf11, primals_1, out=buf12) del buf11 buf13 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) triton_poi_fused_cat_6[grid(256)](primals_1, buf10, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf10 del buf12 buf14 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32) triton_poi_fused_convolution_7[grid(64, 4)](buf13, buf14, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf15 = extern_kernels.convolution(buf14, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf15, (4, 4, 4), (16, 4, 1)) del buf14 buf16 = buf15 del buf15 triton_poi_fused_convolution_8[grid(64)](buf16, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 return reinterpret_tensor(buf16, (4, 4, 4), (16, 1, 4), 0 ), primals_1, primals_2, primals_8, buf6, buf9, reinterpret_tensor( buf13, (4, 16, 4), (64, 1, 16), 0) def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class CQAttentionNew(nn.Module): def __init__(self, dim, drop_rate=0.0): super(CQAttentionNew, self).__init__() w4C = torch.empty(dim, 1) w4Q = torch.empty(dim, 1) w4mlu = torch.empty(1, 1, dim) nn.init.xavier_uniform_(w4C) nn.init.xavier_uniform_(w4Q) nn.init.xavier_uniform_(w4mlu) self.w4C = nn.Parameter(w4C, requires_grad=True) self.w4Q = nn.Parameter(w4Q, requires_grad=True) self.w4mlu = nn.Parameter(w4mlu, requires_grad=True) self.dropout = nn.Dropout(p=drop_rate) self.cqa_linear = Conv1D(in_dim=4 * dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) def trilinear_attention(self, context, query): _batch_size, c_seq_len, _dim = context.shape _batch_size, q_seq_len, _dim = query.shape context = self.dropout(context) query = self.dropout(query) subres0 = torch.matmul(context, self.w4C).expand([-1, -1, q_seq_len]) subres1 = torch.matmul(query, self.w4Q).transpose(1, 2).expand([-1, c_seq_len, -1]) subres2 = torch.matmul(context * self.w4mlu, query.transpose(1, 2)) res = subres0 + subres1 + subres2 return res def forward(self, input_0, input_1, input_2, input_3): primals_3 = self.w4C primals_4 = self.w4Q primals_5 = self.w4mlu primals_8 = self.cqa_linear.conv1d.weight primals_9 = self.cqa_linear.conv1d.bias primals_1 = input_0 primals_2 = input_1 primals_6 = input_2 primals_7 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
MicroTensor-ai/episodic-memory
CQAttention
false
11,708
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
MultiHeadAttentionBlock
import math import torch import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class MultiHeadAttentionBlock(nn.Module): def __init__(self, dim, num_heads, drop_rate): super(MultiHeadAttentionBlock, self).__init__() assert dim % num_heads == 0, 'The channels (%d) is not a multiple of attention heads (%d)' % ( dim, num_heads) self.head_size, self.num_heads, self.dim = int(dim / num_heads ), num_heads, dim self.dropout = nn.Dropout(p=drop_rate) self.query = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride= 1, padding=0, bias=True) self.key = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) self.value = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride= 1, padding=0, bias=True) self.layer_norm1 = nn.LayerNorm(dim, eps=1e-06) self.layer_norm2 = nn.LayerNorm(dim, eps=1e-06) self.out_layer = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_heads, self.head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) @staticmethod def combine_last_two_dim(x): old_shape = list(x.size()) new_shape = old_shape[:-2] + [old_shape[-2] * old_shape[-1]] return x.reshape(shape=new_shape) def forward(self, x, mask=None): output = self.layer_norm1(x) output = self.dropout(output) query = self.transpose_for_scores(self.query(output)) key = self.transpose_for_scores(self.key(output)) value = self.transpose_for_scores(self.value(output)) attention_scores = torch.matmul(query, key.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.head_size) if mask is not None: mask = mask.unsqueeze(1).unsqueeze(2) attention_scores = mask_logits(attention_scores, mask) attention_probs = nn.Softmax(dim=-1)(attention_scores) attention_probs = self.dropout(attention_probs) value = torch.matmul(attention_probs, value) value = self.combine_last_two_dim(value.permute(0, 2, 1, 3)) output = self.dropout(value) residual = output + x output = self.layer_norm2(residual) output = self.dropout(output) output = self.out_layer(output) output = self.dropout(output) + residual return output def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'num_heads': 4, 'drop_rate': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn.parallel import torch.nn as nn import torch.utils.data import torch.backends.cudnn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-06 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) tl.store(out_ptr1 + (x2 + 4 * y3), tmp0, xmask & ymask) tl.store(out_ptr2 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp18 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp25 = tl.load(in_ptr1 + x2, xmask) tmp26 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp31 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = float('-inf') tmp2 = tmp0 == tmp1 tmp3 = tmp2 == 0 tmp4 = tmp3.to(tl.int64) tmp5 = tmp4 != 0 tmp7 = tmp6 == tmp1 tmp8 = tmp7 == 0 tmp9 = tmp8.to(tl.int64) tmp10 = tmp9 != 0 tmp11 = tmp5 | tmp10 tmp13 = tmp12 == tmp1 tmp14 = tmp13 == 0 tmp15 = tmp14.to(tl.int64) tmp16 = tmp15 != 0 tmp17 = tmp11 | tmp16 tmp19 = tmp18 == tmp1 tmp20 = tmp19 == 0 tmp21 = tmp20.to(tl.int64) tmp22 = tmp21 != 0 tmp23 = tmp17 | tmp22 tmp24 = tmp23 == 0 tmp28 = tmp26 + tmp27 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp33 = tmp25 / tmp32 tmp34 = 0.0 tmp35 = tl.where(tmp24, tmp34, tmp33) tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_poi_fused_convolution_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-06 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) @triton.jit def triton_poi_fused_convolution_9(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 4 y1 = yindex // 4 tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + y0, ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr2 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp6, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (4,), (1,)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_convolution_2[grid(16, 4)](buf2, buf3, buf5, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4), (16, 4, 1)) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4), (16, 4, 1)) buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 4), (16, 4, 1)) buf9 = reinterpret_tensor(buf4, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf4 triton_poi_fused_3[grid(64)](buf9, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf10 = reinterpret_tensor(buf6, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf6 triton_poi_fused_3[grid(64)](buf10, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf11 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf10, (16, 1, 4), (4, 0, 1), 0), out=buf11) buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_4[grid(256)](buf11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_5[grid(256)](buf11, buf12, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf11 del buf12 buf14 = buf8 del buf8 triton_poi_fused_convolution_6[grid(64)](buf14, primals_9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_9 buf15 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0) del buf7 extern_kernels.bmm(reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf14, (16, 4, 1), (4, 1, 0), 0), out=buf15) buf16 = buf1 del buf1 buf17 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_7[grid(16)](buf15, primals_3, buf16, buf17, 16, XBLOCK=16, num_warps=1, num_stages=1) buf18 = buf5 del buf5 triton_poi_fused_add_native_layer_norm_8[grid(16, 4)](buf15, primals_3, buf16, buf17, primals_10, primals_11, buf18, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf16 del buf17 del primals_11 buf19 = buf3 del buf3 triton_poi_fused_convolution_9[grid(16, 4)](buf18, buf19, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf20 = extern_kernels.convolution(buf19, primals_12, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf20, (4, 4, 4), (16, 4, 1)) del buf19 buf21 = reinterpret_tensor(buf20, (4, 4, 4), (16, 1, 4), 0) del buf20 triton_poi_fused_add_10[grid(16, 4)](buf21, primals_13, buf15, primals_3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_13 return (buf21, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), buf13, buf15, reinterpret_tensor(buf14, (16, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf18, (4, 4, 4), (16, 1, 4), 0)) def mask_logits(inputs, mask, mask_value=-1e+30): mask = mask.type(torch.float32) return inputs + (1.0 - mask) * mask_value class Conv1D(nn.Module): def __init__(self, in_dim, out_dim, kernel_size=1, stride=1, padding=0, bias=True): super(Conv1D, self).__init__() self.conv1d = nn.Conv1d(in_channels=in_dim, out_channels=out_dim, kernel_size=kernel_size, padding=padding, stride=stride, bias=bias) def forward(self, x): x = x.transpose(1, 2) x = self.conv1d(x) return x.transpose(1, 2) class MultiHeadAttentionBlockNew(nn.Module): def __init__(self, dim, num_heads, drop_rate): super(MultiHeadAttentionBlockNew, self).__init__() assert dim % num_heads == 0, 'The channels (%d) is not a multiple of attention heads (%d)' % ( dim, num_heads) self.head_size, self.num_heads, self.dim = int(dim / num_heads ), num_heads, dim self.dropout = nn.Dropout(p=drop_rate) self.query = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride= 1, padding=0, bias=True) self.key = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) self.value = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride= 1, padding=0, bias=True) self.layer_norm1 = nn.LayerNorm(dim, eps=1e-06) self.layer_norm2 = nn.LayerNorm(dim, eps=1e-06) self.out_layer = Conv1D(in_dim=dim, out_dim=dim, kernel_size=1, stride=1, padding=0, bias=True) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_heads, self.head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) @staticmethod def combine_last_two_dim(x): old_shape = list(x.size()) new_shape = old_shape[:-2] + [old_shape[-2] * old_shape[-1]] return x.reshape(shape=new_shape) def forward(self, input_0): primals_4 = self.query.conv1d.weight primals_1 = self.query.conv1d.bias primals_6 = self.key.conv1d.weight primals_2 = self.key.conv1d.bias primals_8 = self.value.conv1d.weight primals_5 = self.value.conv1d.bias primals_7 = self.layer_norm1.weight primals_9 = self.layer_norm1.bias primals_10 = self.layer_norm2.weight primals_11 = self.layer_norm2.bias primals_12 = self.out_layer.conv1d.weight primals_13 = self.out_layer.conv1d.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
MicroTensor-ai/episodic-memory
MultiHeadAttentionBlock
false
11,709
[ "MIT" ]
0
295a3752ab94c7a6f45355aa2c54bffbf84b574f
https://github.com/MicroTensor-ai/episodic-memory/tree/295a3752ab94c7a6f45355aa2c54bffbf84b574f
LayerNorm
import torch from torch import nn class LayerNorm(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim=1, unbiased=False, keepdim=True).sqrt() mean = torch.mean(x, dim=1, keepdim=True) return (x - mean) / (std + self.eps) * self.g + self.b def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp21 / tmp8 tmp23 = libdevice.sqrt(tmp22) tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = tmp10 / tmp25 tmp28 = tmp26 * tmp27 tmp30 = tmp28 + tmp29 tl.store(out_ptr0 + x3, tmp30, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_sqrt_sub_var_0[grid(256)](primals_1, primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, dim, eps=1e-05): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, input_0): primals_2 = self.g primals_3 = self.b primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mohan-Zhang-u/vit-pytorch
LayerNorm
false
11,710
[ "MIT" ]
0
76050c812474d7c10d67db4e811f537e26c3996a
https://github.com/Mohan-Zhang-u/vit-pytorch/tree/76050c812474d7c10d67db4e811f537e26c3996a
PEG
import torch from torch import nn class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEG(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, x): return self.proj(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x3, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf1, primals_2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PEGNew(nn.Module): def __init__(self, dim, kernel_size=3): super().__init__() self.proj = Residual(nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim, stride=1)) def forward(self, input_0): primals_1 = self.proj.fn.weight primals_2 = self.proj.fn.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Mohan-Zhang-u/vit-pytorch
PEG
false
11,711
[ "MIT" ]
0
76050c812474d7c10d67db4e811f537e26c3996a
https://github.com/Mohan-Zhang-u/vit-pytorch/tree/76050c812474d7c10d67db4e811f537e26c3996a
ClassHead
import torch from torch import nn import torch.cuda class ClassHead(nn.Module): """ ClassHead RetinaFace head for classification branch. Args: inchannels (`int`): number of input channels. num_anchors (`int`): number of anchors. """ def __init__(self, inchannels: 'int'=512, num_anchors: 'int'=3): super().__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, x: 'torch.FloatTensor'): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 2) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 6 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 6), (24576, 384, 6, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 12288, 2), (24576, 2, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(98304)](buf3, primals_2, 98304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class ClassHeadNew(nn.Module): """ ClassHead RetinaFace head for classification branch. Args: inchannels (`int`): number of input channels. num_anchors (`int`): number of anchors. """ def __init__(self, inchannels: 'int'=512, num_anchors: 'int'=3): super().__init__() self.num_anchors = num_anchors self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LoveEachDay/towhee
ClassHead
false
11,712
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
FakeRKHSConvNet
import math import torch import numpy as np import torch.nn as nn class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super(MaybeBatchNorm2d, self).__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: x = self.bn(x) return x class FakeRKHSConvNet(nn.Module): def __init__(self, n_input, n_output, use_bn=False): super(FakeRKHSConvNet, self).__init__() self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn_out = MaybeBatchNorm2d(n_output, True, True) self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride= 1, padding=0, bias=True) if n_output >= n_input: eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool) for i in range(n_input): eye_mask[i, i, 0, 0] = 1 self.shortcut.weight.data.uniform_(-0.01, 0.01) self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def init_weights(self, init_scale=1.0): nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5)) self.conv1.weight.data.mul_(init_scale) nn.init.constant_(self.conv2.weight, 0.0) def forward(self, x): h_res = self.conv2(self.relu1(self.bn1(self.conv1(x)))) h = self.bn_out(h_res + self.shortcut(x)) return h def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_input': 4, 'n_output': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1( in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp6 = tmp4 - tmp5 tmp8 = 1e-05 tmp9 = tmp7 + tmp8 tmp10 = libdevice.sqrt(tmp9) tmp11 = tl.full([1], 1, tl.int32) tmp12 = tmp11 / tmp10 tmp13 = 1.0 tmp14 = tmp12 * tmp13 tmp15 = tmp6 * tmp14 tmp17 = tmp15 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x3, tmp19, xmask) tl.store(out_ptr1 + x3, tmp6, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = extern_kernels.convolution(primals_2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1)) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__native_batch_norm_legit_no_training_add_convolution_native_batch_norm_backward_1[ grid(256)](buf2, buf3, primals_5, primals_6, primals_7, primals_8, primals_9, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del buf3 del primals_5 del primals_6 del primals_9 return (buf4, primals_1, primals_2, primals_3, primals_4, primals_7, primals_8, buf1, buf5) class MaybeBatchNorm2d(nn.Module): def __init__(self, n_ftr, affine, use_bn): super(MaybeBatchNorm2d, self).__init__() self.bn = nn.BatchNorm2d(n_ftr, affine=affine) self.use_bn = use_bn def forward(self, x): if self.use_bn: x = self.bn(x) return x class FakeRKHSConvNetNew(nn.Module): def __init__(self, n_input, n_output, use_bn=False): super(FakeRKHSConvNetNew, self).__init__() self.conv1 = nn.Conv2d(n_input, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = MaybeBatchNorm2d(n_output, True, use_bn) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(n_output, n_output, kernel_size=1, stride=1, padding=0, bias=False) self.bn_out = MaybeBatchNorm2d(n_output, True, True) self.shortcut = nn.Conv2d(n_input, n_output, kernel_size=1, stride= 1, padding=0, bias=True) if n_output >= n_input: eye_mask = np.zeros((n_output, n_input, 1, 1), dtype=np.bool) for i in range(n_input): eye_mask[i, i, 0, 0] = 1 self.shortcut.weight.data.uniform_(-0.01, 0.01) self.shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def init_weights(self, init_scale=1.0): nn.init.kaiming_uniform_(self.conv1.weight, a=math.sqrt(5)) self.conv1.weight.data.mul_(init_scale) nn.init.constant_(self.conv2.weight, 0.0) def forward(self, input_0): primals_1 = self.conv1.weight primals_5 = self.bn1.bn.weight primals_6 = self.bn1.bn.bias primals_3 = self.conv2.weight primals_7 = self.bn_out.bn.weight primals_8 = self.bn_out.bn.bias primals_4 = self.shortcut.weight primals_9 = self.shortcut.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Luab/pytorch-lightning-bolts
FakeRKHSConvNet
false
11,713
[ "Apache-2.0" ]
0
b8ac85154465956b06fd1005b21b071af5493f11
https://github.com/Luab/pytorch-lightning-bolts/tree/b8ac85154465956b06fd1005b21b071af5493f11
SchedulerTestNet
import torch from torch.nn import functional as F class SchedulerTestNet(torch.nn.Module): """ adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py """ def __init__(self): super(SchedulerTestNet, self).__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, x): return self.conv2(F.relu(self.conv1(x))) def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tl.store(in_out_ptr0 + x0, tmp5, None) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_4, (1, 1, 1, 1), (1, 1, 1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(16384)](buf1, primals_2, 16384, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(16384)](buf3, primals_5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class SchedulerTestNetNew(torch.nn.Module): """ adapted from: https://github.com/pytorch/pytorch/blob/master/test/test_optim.py """ def __init__(self): super(SchedulerTestNetNew, self).__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Luab/pytorch-lightning-bolts
SchedulerTestNet
false
11,714
[ "Apache-2.0" ]
0
b8ac85154465956b06fd1005b21b071af5493f11
https://github.com/Luab/pytorch-lightning-bolts/tree/b8ac85154465956b06fd1005b21b071af5493f11
Project3D
import torch import torch.nn as nn class Project3D(nn.Module): """Layer which projects 3D points into a camera with intrinsics K and at position T """ def __init__(self, batch_size, height, width, eps=1e-07): super(Project3D, self).__init__() self.batch_size = batch_size self.height = height self.width = width self.eps = eps def forward(self, points, K, T): P = torch.matmul(K, T)[:, :3, :] cam_points = torch.matmul(P, points) pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze( 1) + self.eps) pix_coords = pix_coords.view(self.batch_size, 2, self.height, self. width) pix_coords = pix_coords.permute(0, 2, 3, 1) pix_coords[..., 0] /= self.width - 1 pix_coords[..., 1] /= self.height - 1 pix_coords = (pix_coords - 0.5) * 2 return pix_coords def get_inputs(): return [torch.rand([4, 3, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {'batch_size': 4, 'height': 4, 'width': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 48 x1 = xindex // 48 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_mul_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 2 x0 = xindex % 16 x2 = xindex // 32 x3 = xindex % 32 x4 = xindex tmp7 = tl.load(in_ptr0 + (x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr0 + (32 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (16 + x0 + 48 * x2), xmask, eviction_policy= 'evict_last') tmp22 = tl.load(in_ptr0 + (x3 + 48 * x2), xmask) tmp0 = x1 tmp1 = tl.full([1], 1, tl.int32) tmp2 = tmp0 == tmp1 tmp3 = tmp1 == tmp1 tmp4 = tl.full([1], 0, tl.int32) tmp5 = tmp1 == tmp4 tmp6 = tmp4 == tmp4 tmp9 = 1e-07 tmp10 = tmp8 + tmp9 tmp11 = tmp7 / tmp10 tmp12 = 0.3333333333333333 tmp13 = tmp11 * tmp12 tmp14 = tl.where(tmp6, tmp13, tmp11) tmp16 = tmp15 / tmp10 tmp17 = tl.where(tmp5, tmp13, tmp16) tmp18 = tl.where(tmp5, tmp14, tmp17) tmp19 = tmp18 * tmp12 tmp20 = tl.where(tmp3, tmp19, tmp18) tmp21 = tmp0 == tmp4 tmp23 = tmp22 / tmp10 tmp24 = tl.where(tmp21, tmp13, tmp23) tmp25 = tl.where(tmp21, tmp14, tmp24) tmp26 = tl.where(tmp2, tmp19, tmp25) tmp27 = tl.where(tmp2, tmp20, tmp26) tmp28 = 0.5 tmp29 = tmp27 - tmp28 tmp30 = 2.0 tmp31 = tmp29 * tmp30 tl.store(out_ptr0 + x4, tmp31, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 3, 4, 4), (48, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1 ), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(192)](buf0, buf1, 192, XBLOCK=256, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((12, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (12, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (12, 4, 4), (16, 4, 1), 0), out=buf2 ) del arg2_1 del buf1 buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 4, 1, 16), torch.float32) triton_poi_fused_mul_sub_1[grid(128)](buf2, buf3, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 return buf3, class Project3DNew(nn.Module): """Layer which projects 3D points into a camera with intrinsics K and at position T """ def __init__(self, batch_size, height, width, eps=1e-07): super(Project3DNew, self).__init__() self.batch_size = batch_size self.height = height self.width = width self.eps = eps def forward(self, input_0, input_1, input_2): arg2_1 = input_0 arg0_1 = input_1 arg1_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Morbotu/drone-PWS
Project3D
false
11,715
[ "MIT" ]
0
face9cbf30a55783592cce8af59c1c70da982b6a
https://github.com/Morbotu/drone-PWS/tree/face9cbf30a55783592cce8af59c1c70da982b6a
BboxHead
import torch from torch import nn import torch.cuda class BboxHead(nn.Module): """ BboxHead RetinaFace head for bounding box branch. Args: inchannels (`int`): number of input channels. num_anchors (`int`): number of anchors. """ def __init__(self, inchannels: 'int'=512, num_anchors: 'int'=3): super().__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, x: 'torch.FloatTensor'): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 4) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 12 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (12,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 12), (49152, 768, 12, 1), 0 ) del buf1 buf3 = reinterpret_tensor(buf2, (4, 12288, 4), (49152, 4, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(196608)](buf3, primals_2, 196608, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 return buf3, primals_1, buf0 class BboxHeadNew(nn.Module): """ BboxHead RetinaFace head for bounding box branch. Args: inchannels (`int`): number of input channels. num_anchors (`int`): number of anchors. """ def __init__(self, inchannels: 'int'=512, num_anchors: 'int'=3): super().__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LoveEachDay/towhee
BboxHead
false
11,716
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
LandmarkHead
import torch from torch import nn import torch.cuda class LandmarkHead(nn.Module): """ LandmarkHead RetinaFace head for landmark branch. inchannels (`int`): number of input channels. num_anchors (`int`): number of anchors. """ def __init__(self, inchannels: 'int'=512, num_anchors: 'int'=3): super().__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size= (1, 1), stride=1, padding=0) def forward(self, x: 'torch.FloatTensor'): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 10) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 30 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (30, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (30,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 30, 64, 64), (122880, 1, 1920, 30)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 30), (122880, 1920, 30, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 12288, 10), (122880, 10, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(491520)](buf3, primals_2, 491520, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class LandmarkHeadNew(nn.Module): """ LandmarkHead RetinaFace head for landmark branch. inchannels (`int`): number of input channels. num_anchors (`int`): number of anchors. """ def __init__(self, inchannels: 'int'=512, num_anchors: 'int'=3): super().__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size= (1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
LoveEachDay/towhee
LandmarkHead
false
11,717
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
AmdimNCELoss
import torch import torch.nn as nn def tanh_clip(x, clip_val=10.0): """ soft clip values to the range [-clip_val, +clip_val] """ if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELoss(nn.Module): """ Compute the NCE scores for predicting r_src->r_trg. """ def __init__(self, tclip): super().__init__() self.tclip = tclip def forward(self, anchor_representations, positive_representations, mask_mat): """ Args: anchor_representations: (batch_size, emb_dim) positive_representations: (emb_dim, n_batch * w* h) (ie: nb_feat_vectors x embedding_dim) mask_mat: (n_batch_gpu, n_batch) Output: raw_scores: (n_batch_gpu, n_locs) nce_scores: (n_batch_gpu, n_locs) lgt_reg : scalar """ r_src = anchor_representations r_trg = positive_representations batch_size, emb_dim = r_src.size() nb_feat_vectors = r_trg.size(1) // batch_size mask_pos = mask_mat.unsqueeze(dim=2).expand(-1, -1, nb_feat_vectors ).float() mask_neg = 1.0 - mask_pos raw_scores = torch.mm(r_src, r_trg).float() raw_scores = raw_scores.reshape(batch_size, batch_size, nb_feat_vectors ) raw_scores = raw_scores / emb_dim ** 0.5 lgt_reg = 0.05 * (raw_scores ** 2.0).mean() raw_scores = tanh_clip(raw_scores, clip_val=self.tclip) """ pos_scores includes scores for all the positive samples neg_scores includes scores for all the negative samples, with scores for positive samples set to the min score (-self.tclip here) """ pos_scores = (mask_pos * raw_scores).sum(dim=1) neg_scores = mask_neg * raw_scores - self.tclip * mask_pos neg_scores = neg_scores.reshape(batch_size, -1) mask_neg = mask_neg.reshape(batch_size, -1) neg_maxes = torch.max(neg_scores, dim=1, keepdim=True)[0] neg_sumexp = (mask_neg * torch.exp(neg_scores - neg_maxes)).sum(dim =1, keepdim=True) all_logsumexp = torch.log(torch.exp(pos_scores - neg_maxes) + neg_sumexp) pos_shiftexp = pos_scores - neg_maxes nce_scores = pos_shiftexp - all_logsumexp nce_scores = -nce_scores.mean() return nce_scores, lgt_reg def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'tclip': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp36 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = 0.5 tmp5 = tmp3 * tmp4 tmp6 = 0.25 tmp7 = tmp5 * tmp6 tmp8 = libdevice.tanh(tmp7) tmp9 = 4.0 tmp10 = tmp8 * tmp9 tmp11 = tmp2 * tmp10 tmp12 = tmp0 * tmp9 tmp13 = tmp11 - tmp12 tmp15 = tmp1 - tmp14 tmp17 = tmp16 * tmp4 tmp18 = tmp17 * tmp6 tmp19 = libdevice.tanh(tmp18) tmp20 = tmp19 * tmp9 tmp21 = tmp15 * tmp20 tmp22 = tmp14 * tmp9 tmp23 = tmp21 - tmp22 tmp24 = triton_helpers.maximum(tmp13, tmp23) tmp26 = tmp1 - tmp25 tmp28 = tmp27 * tmp4 tmp29 = tmp28 * tmp6 tmp30 = libdevice.tanh(tmp29) tmp31 = tmp30 * tmp9 tmp32 = tmp26 * tmp31 tmp33 = tmp25 * tmp9 tmp34 = tmp32 - tmp33 tmp35 = triton_helpers.maximum(tmp24, tmp34) tmp37 = tmp1 - tmp36 tmp39 = tmp38 * tmp4 tmp40 = tmp39 * tmp6 tmp41 = libdevice.tanh(tmp40) tmp42 = tmp41 * tmp9 tmp43 = tmp37 * tmp42 tmp44 = tmp36 * tmp9 tmp45 = tmp43 - tmp44 tmp46 = triton_helpers.maximum(tmp35, tmp45) tmp47 = tmp13 - tmp46 tmp48 = tl_math.exp(tmp47) tmp49 = tmp2 * tmp48 tmp50 = tmp23 - tmp46 tmp51 = tl_math.exp(tmp50) tmp52 = tmp15 * tmp51 tmp53 = tmp49 + tmp52 tmp54 = tmp34 - tmp46 tmp55 = tl_math.exp(tmp54) tmp56 = tmp26 * tmp55 tmp57 = tmp53 + tmp56 tmp58 = tmp45 - tmp46 tmp59 = tl_math.exp(tmp58) tmp60 = tmp37 * tmp59 tmp61 = tmp57 + tmp60 tmp62 = tmp0 * tmp10 tmp63 = tmp14 * tmp20 tmp64 = tmp62 + tmp63 tmp65 = tmp25 * tmp31 tmp66 = tmp64 + tmp65 tmp67 = tmp36 * tmp42 tmp68 = tmp66 + tmp67 tmp69 = tmp68 - tmp46 tmp70 = tl_math.exp(tmp69) tmp71 = tmp70 + tmp61 tmp72 = tl_math.log(tmp71) tmp73 = tmp69 - tmp72 tmp74 = tl.broadcast_to(tmp73, [XBLOCK, RBLOCK]) tmp76 = tl.sum(tmp74, 1)[:, None] tmp77 = tmp76 / tmp9 tmp78 = -tmp77 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp78, None) @triton.jit def triton_per_fused_div_mean_mul_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = tmp2 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tmp9 = 0.05 tmp10 = tmp8 * tmp9 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) assert_size_stride(arg2_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, arg1_1, out=buf0) del arg0_1 del arg1_1 buf4 = empty_strided_cuda((), (), torch.float32) buf6 = buf4 del buf4 get_raw_stream(0) triton_per_fused_add_div_exp_log_max_mean_mul_neg_sub_sum_tanh_0[grid (1)](buf6, arg2_1, buf0, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del arg2_1 buf5 = empty_strided_cuda((), (), torch.float32) buf7 = buf5 del buf5 triton_per_fused_div_mean_mul_pow_1[grid(1)](buf7, buf0, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 return buf6, buf7 def tanh_clip(x, clip_val=10.0): """ soft clip values to the range [-clip_val, +clip_val] """ if clip_val is not None: x_clip = clip_val * torch.tanh(1.0 / clip_val * x) else: x_clip = x return x_clip class AmdimNCELossNew(nn.Module): """ Compute the NCE scores for predicting r_src->r_trg. """ def __init__(self, tclip): super().__init__() self.tclip = tclip def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
Luab/pytorch-lightning-bolts
AmdimNCELoss
false
11,718
[ "Apache-2.0" ]
0
b8ac85154465956b06fd1005b21b071af5493f11
https://github.com/Luab/pytorch-lightning-bolts/tree/b8ac85154465956b06fd1005b21b071af5493f11
Conv3x3
import torch import torch.nn as nn class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0 class Conv3x3New(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3New, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Morbotu/drone-PWS
Conv3x3
false
11,719
[ "MIT" ]
0
face9cbf30a55783592cce8af59c1c70da982b6a
https://github.com/Morbotu/drone-PWS/tree/face9cbf30a55783592cce8af59c1c70da982b6a
ConvBlock
import torch import torch.nn as nn class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlock(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlock, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, x): out = self.conv(x) out = self.nonlin(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x3, tmp9, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_elu_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, primals_2, buf0, buf2 class Conv3x3(nn.Module): """Layer to pad and convolve input """ def __init__(self, in_channels, out_channels, use_refl=True): super(Conv3x3, self).__init__() if use_refl: self.pad = nn.ReflectionPad2d(1) else: self.pad = nn.ZeroPad2d(1) self.conv = nn.Conv2d(int(in_channels), int(out_channels), 3) def forward(self, x): out = self.pad(x) out = self.conv(out) return out class ConvBlockNew(nn.Module): """Layer to perform a convolution followed by ELU """ def __init__(self, in_channels, out_channels): super(ConvBlockNew, self).__init__() self.conv = Conv3x3(in_channels, out_channels) self.nonlin = nn.ELU(inplace=True) def forward(self, input_0): primals_2 = self.conv.conv.weight primals_3 = self.conv.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Morbotu/drone-PWS
ConvBlock
false
11,720
[ "MIT" ]
0
face9cbf30a55783592cce8af59c1c70da982b6a
https://github.com/Morbotu/drone-PWS/tree/face9cbf30a55783592cce8af59c1c70da982b6a
Scaled_Dot_Product_Attention
import torch import torch.nn as nn import torch.nn.functional as F class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: 缩放因子 论文为根号dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 del buf2 return buf3, class Scaled_Dot_Product_AttentionNew(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_AttentionNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
Moon-xm/Chinese-Text-Classification-Pytorch
Scaled_Dot_Product_Attention
false
11,721
[ "MIT" ]
0
19fe64006418bf4296f884e4d1f038c17b34d3de
https://github.com/Moon-xm/Chinese-Text-Classification-Pytorch/tree/19fe64006418bf4296f884e4d1f038c17b34d3de
Discriminator
import torch import numpy as np import torch.nn as nn from torch.nn import functional as F class Discriminator(nn.Module): def __init__(self, img_shape, hidden_dim=1024): super().__init__() in_dim = int(np.prod(img_shape)) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2) self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2) self.fc4 = nn.Linear(self.fc3.out_features, 1) def forward(self, img): x = img.view(img.size(0), -1) x = F.leaky_relu(self.fc1(x), 0.2) x = F.dropout(x, 0.3) x = F.leaky_relu(self.fc2(x), 0.2) x = F.dropout(x, 0.3) x = F.leaky_relu(self.fc3(x), 0.2) x = F.dropout(x, 0.3) return torch.sigmoid(self.fc4(x)) def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'img_shape': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 1024 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) @triton.jit def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr1 + x2, tmp7, xmask) @triton.jit def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (1024, 4), (4, 1)) assert_size_stride(primals_3, (1024,), (1,)) assert_size_stride(primals_4, (512, 1024), (1024, 1)) assert_size_stride(primals_5, (512,), (1,)) assert_size_stride(primals_6, (256, 512), (512, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (1, 256), (256, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 1024 ), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool) buf2 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(4096)](buf0, primals_3, buf1, buf2, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf0 del primals_3 buf3 = torch.ops.aten.native_dropout.default(buf2, 0.3, True) del buf2 buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((4, 512), (512, 1), torch.float32) extern_kernels.mm(buf4, reinterpret_tensor(primals_4, (1024, 512), (1, 1024), 0), out=buf6) buf7 = empty_strided_cuda((4, 512), (512, 1), torch.bool) buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32) triton_poi_fused_leaky_relu_1[grid(2048)](buf6, primals_5, buf7, buf8, 2048, XBLOCK=256, num_warps=4, num_stages=1) del buf6 del primals_5 buf9 = torch.ops.aten.native_dropout.default(buf8, 0.3, True) del buf8 buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = empty_strided_cuda((4, 256), (256, 1), torch.float32) extern_kernels.mm(buf10, reinterpret_tensor(primals_6, (512, 256), (1, 512), 0), out=buf12) buf13 = empty_strided_cuda((4, 256), (256, 1), torch.bool) buf14 = empty_strided_cuda((4, 256), (256, 1), torch.float32) triton_poi_fused_leaky_relu_2[grid(1024)](buf12, primals_7, buf13, buf14, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf12 del primals_7 buf15 = torch.ops.aten.native_dropout.default(buf14, 0.3, True) del buf14 buf16 = buf15[0] buf17 = buf15[1] del buf15 buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf16, reinterpret_tensor(primals_8, (256, 1), (1, 256), 0), out=buf18) buf19 = buf18 del buf18 triton_poi_fused_sigmoid_3[grid(4)](buf19, primals_9, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_9 return (buf19, primals_1, buf1, buf4, buf5, buf7, buf10, buf11, buf13, buf16, buf17, buf19, primals_8, primals_6, primals_4) class DiscriminatorNew(nn.Module): def __init__(self, img_shape, hidden_dim=1024): super().__init__() in_dim = int(np.prod(img_shape)) self.fc1 = nn.Linear(in_dim, hidden_dim) self.fc2 = nn.Linear(self.fc1.out_features, self.fc1.out_features // 2) self.fc3 = nn.Linear(self.fc2.out_features, self.fc2.out_features // 2) self.fc4 = nn.Linear(self.fc3.out_features, 1) def forward(self, input_0): primals_2 = self.fc1.weight primals_3 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.fc3.weight primals_7 = self.fc3.bias primals_8 = self.fc4.weight primals_9 = self.fc4.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Luab/pytorch-lightning-bolts
Discriminator
false
11,722
[ "Apache-2.0" ]
0
b8ac85154465956b06fd1005b21b071af5493f11
https://github.com/Luab/pytorch-lightning-bolts/tree/b8ac85154465956b06fd1005b21b071af5493f11
DenseBlock
import torch from torch import nn as nn from torch.nn import functional as F class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): """Two parallel 1D causal convolution layers w/tanh and sigmoid activations Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions of the input, and T is the number of steps. Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the number of dimensions of the input, `F` is the number of filters, and `T` is the length of the input sequence. Arguments: in_channels (int): number of input channels filters (int): number of filters per channel """ def __init__(self, in_channels, filters, dilation=1): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh * sig], dim=1) return out def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'filters': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 5 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 5 * (-4 + x1) + 20 * x2), tmp6 & xmask, other=0.0) tmp10 = libdevice.tanh(tmp9) tmp11 = tl.load(in_ptr2 + (x0 + 5 * (-4 + x1) + 20 * x2), tmp6 & xmask, other=0.0) tmp12 = tl.sigmoid(tmp11) tmp13 = tmp10 * tmp12 tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype) tmp15 = tl.where(tmp6, tmp13, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(80)](buf1, primals_2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 5), (20, 5, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_0[grid(80)](buf3, primals_5, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_1[grid(128)](primals_3, buf1, buf3, buf4, 128, XBLOCK=128, num_warps=4, num_stages=1) return buf4, primals_1, primals_3, primals_4, buf1, buf3 class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlockNew(nn.Module): """Two parallel 1D causal convolution layers w/tanh and sigmoid activations Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions of the input, and T is the number of steps. Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the number of dimensions of the input, `F` is the number of filters, and `T` is the length of the input sequence. Arguments: in_channels (int): number of input channels filters (int): number of filters per channel """ def __init__(self, in_channels, filters, dilation=1): super(DenseBlockNew, self).__init__() self.causal_conv1 = CausalConv1d(in_channels, filters, dilation= dilation) self.causal_conv2 = CausalConv1d(in_channels, filters, dilation= dilation) def forward(self, input_0): primals_1 = self.causal_conv1.causal_conv.weight primals_2 = self.causal_conv1.causal_conv.bias primals_4 = self.causal_conv2.causal_conv.weight primals_5 = self.causal_conv2.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
NagisaZj/ProMP
DenseBlock
false
11,723
[ "MIT" ]
0
539739ae2b7d5fdcad00855da695f643b23df4b3
https://github.com/NagisaZj/ProMP/tree/539739ae2b7d5fdcad00855da695f643b23df4b3
SSIM
import torch import torch.nn as nn class SSIM(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIM, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, x, y): x = self.refl(x) y = self.refl(y) mu_x = self.mu_x_pool(x) mu_y = self.mu_y_pool(y) sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2 sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2 sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2) SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2) return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_reflection_pad2d_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask) tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask) tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask) tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask) tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask) tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask) tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask) tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask) tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask) tmp19 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp22 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp24 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp26 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp28 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp30 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp32 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp34 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp55 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp58 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), xmask) tmp60 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp62 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp64 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-3 + x1) + 16 * x2), xmask) tmp66 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask, eviction_policy ='evict_last') tmp68 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp70 = tl.load(in_ptr2 + (15 + -1 * tl_math.abs(-2 + x0) + -4 * tl_math.abs(-2 + x1) + 16 * x2), xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp17 = 0.1111111111111111 tmp18 = tmp16 * tmp17 tmp21 = tmp20 + tmp19 tmp23 = tmp22 + tmp21 tmp25 = tmp24 + tmp23 tmp27 = tmp26 + tmp25 tmp29 = tmp28 + tmp27 tmp31 = tmp30 + tmp29 tmp33 = tmp32 + tmp31 tmp35 = tmp34 + tmp33 tmp36 = tmp35 * tmp17 tmp37 = tmp19 * tmp19 tmp38 = tmp20 * tmp20 tmp39 = tmp38 + tmp37 tmp40 = tmp22 * tmp22 tmp41 = tmp40 + tmp39 tmp42 = tmp24 * tmp24 tmp43 = tmp42 + tmp41 tmp44 = tmp26 * tmp26 tmp45 = tmp44 + tmp43 tmp46 = tmp28 * tmp28 tmp47 = tmp46 + tmp45 tmp48 = tmp30 * tmp30 tmp49 = tmp48 + tmp47 tmp50 = tmp32 * tmp32 tmp51 = tmp50 + tmp49 tmp52 = tmp34 * tmp34 tmp53 = tmp52 + tmp51 tmp54 = tmp53 * tmp17 tmp57 = tmp56 + tmp55 tmp59 = tmp58 + tmp57 tmp61 = tmp60 + tmp59 tmp63 = tmp62 + tmp61 tmp65 = tmp64 + tmp63 tmp67 = tmp66 + tmp65 tmp69 = tmp68 + tmp67 tmp71 = tmp70 + tmp69 tmp72 = tmp71 * tmp17 tmp73 = tmp55 * tmp55 tmp74 = tmp56 * tmp56 tmp75 = tmp74 + tmp73 tmp76 = tmp58 * tmp58 tmp77 = tmp76 + tmp75 tmp78 = tmp60 * tmp60 tmp79 = tmp78 + tmp77 tmp80 = tmp62 * tmp62 tmp81 = tmp80 + tmp79 tmp82 = tmp64 * tmp64 tmp83 = tmp82 + tmp81 tmp84 = tmp66 * tmp66 tmp85 = tmp84 + tmp83 tmp86 = tmp68 * tmp68 tmp87 = tmp86 + tmp85 tmp88 = tmp70 * tmp70 tmp89 = tmp88 + tmp87 tmp90 = tmp89 * tmp17 tmp91 = 2.0 tmp92 = tmp36 * tmp91 tmp93 = tmp92 * tmp72 tmp94 = 0.0001 tmp95 = tmp93 + tmp94 tmp96 = tmp36 * tmp72 tmp97 = tmp18 - tmp96 tmp98 = tmp97 * tmp91 tmp99 = 0.0009 tmp100 = tmp98 + tmp99 tmp101 = tmp95 * tmp100 tmp102 = tmp36 * tmp36 tmp103 = tmp72 * tmp72 tmp104 = tmp102 + tmp103 tmp105 = tmp104 + tmp94 tmp106 = tmp54 - tmp102 tmp107 = tmp90 - tmp103 tmp108 = tmp106 + tmp107 tmp109 = tmp108 + tmp99 tmp110 = tmp105 * tmp109 tmp111 = tmp101 / tmp110 tmp112 = 1.0 tmp113 = tmp112 - tmp111 tmp114 = 0.5 tmp115 = tmp113 * tmp114 tmp116 = 0.0 tmp117 = triton_helpers.maximum(tmp115, tmp116) tmp118 = triton_helpers.minimum(tmp117, tmp112) tl.store(in_out_ptr0 + x3, tmp118, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_reflection_pad2d_0[grid(576)](arg0_1, arg1_1, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf6 = buf0 del buf0 buf7 = buf6 del buf6 triton_poi_fused_add_avg_pool2d_clamp_div_mul_pow_reflection_pad2d_rsub_sub_1[ grid(256)](buf7, buf2, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 del buf2 return buf7, class SSIMNew(nn.Module): """Layer to compute the SSIM loss between a pair of images """ def __init__(self): super(SSIMNew, self).__init__() self.mu_x_pool = nn.AvgPool2d(3, 1) self.mu_y_pool = nn.AvgPool2d(3, 1) self.sig_x_pool = nn.AvgPool2d(3, 1) self.sig_y_pool = nn.AvgPool2d(3, 1) self.sig_xy_pool = nn.AvgPool2d(3, 1) self.refl = nn.ReflectionPad2d(1) self.C1 = 0.01 ** 2 self.C2 = 0.03 ** 2 def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
Morbotu/drone-PWS
SSIM
false
11,724
[ "MIT" ]
0
face9cbf30a55783592cce8af59c1c70da982b6a
https://github.com/Morbotu/drone-PWS/tree/face9cbf30a55783592cce8af59c1c70da982b6a
Multi_Head_Attention
import torch import torch.nn as nn import torch.nn.functional as F class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: 缩放因子 论文为根号dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_Attention(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_Attention, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): batch_size = x.size(0) Q = self.fc_Q(x) K = self.fc_K(x) V = self.fc_V(x) Q = Q.view(batch_size * self.num_head, -1, self.dim_head) K = K.view(batch_size * self.num_head, -1, self.dim_head) V = V.view(batch_size * self.num_head, -1, self.dim_head) scale = K.size(-1) ** -0.5 context = self.attention(Q, K, V, scale) context = context.view(batch_size, -1, self.dim_head * self.num_head) out = self.fc(context) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'num_head': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x5, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3) buf4 = buf3 del buf3 get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1, buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf7 del buf8 del primals_11 return buf9, primals_1, primals_10, buf4, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, primals_8, reinterpret_tensor(buf2, (16, 1, 1 ), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0 ), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0) class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: 缩放因子 论文为根号dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_AttentionNew(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_AttentionNew, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, input_0): primals_1 = self.fc_Q.weight primals_3 = self.fc_Q.bias primals_2 = self.fc_K.weight primals_5 = self.fc_K.bias primals_4 = self.fc_V.weight primals_7 = self.fc_V.bias primals_6 = self.fc.weight primals_9 = self.fc.bias primals_10 = self.layer_norm.weight primals_11 = self.layer_norm.bias primals_8 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
Moon-xm/Chinese-Text-Classification-Pytorch
Multi_Head_Attention
false
11,725
[ "MIT" ]
0
19fe64006418bf4296f884e4d1f038c17b34d3de
https://github.com/Moon-xm/Chinese-Text-Classification-Pytorch/tree/19fe64006418bf4296f884e4d1f038c17b34d3de
HuberLoss
import torch from torch import nn as nn class HuberLoss(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, x, x_hat): loss = self.huber_loss_delta1(x / self.delta, x_hat / self.delta) return loss * self.delta * self.delta def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp5 = tmp2 - tmp4 tmp6 = tl_math.abs(tmp5) tmp7 = tmp6 < tmp1 tmp8 = tmp6 * tmp6 tmp9 = 0.5 tmp10 = tmp8 * tmp9 tmp11 = tmp10 * tmp1 tmp12 = tmp6 - tmp9 tmp13 = tl.where(tmp7, tmp11, tmp12) tmp14 = tl.broadcast_to(tmp13, [RBLOCK]) tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0)) tmp17 = 256.0 tmp18 = tmp16 / tmp17 tmp19 = tmp18 * tmp1 tmp20 = tmp19 * tmp1 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_div_mul_smooth_l1_loss_0[grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class HuberLossNew(nn.Module): def __init__(self, delta=1): super().__init__() self.huber_loss_delta1 = nn.SmoothL1Loss() self.delta = delta def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NagisaZj/ProMP
HuberLoss
false
11,726
[ "MIT" ]
0
539739ae2b7d5fdcad00855da695f643b23df4b3
https://github.com/NagisaZj/ProMP/tree/539739ae2b7d5fdcad00855da695f643b23df4b3
LayerNorm
import torch from torch import nn as nn class LayerNorm(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) output = (x - mean) / (std + self.eps) if self.scale: output = output * self.scale_param if self.center: output = output + self.center_param return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_std_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = libdevice.sqrt(tmp23) tmp25 = 1e-06 tmp26 = tmp24 + tmp25 tmp27 = tmp10 / tmp26 tmp29 = tmp27 + tmp28 tl.store(out_ptr0 + x2, tmp29, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_std_sub_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf0, class LayerNormNew(nn.Module): """ Simple 1D LayerNorm. """ def __init__(self, features, center=True, scale=False, eps=1e-06): super().__init__() self.center = center self.scale = scale self.eps = eps if self.scale: self.scale_param = nn.Parameter(torch.ones(features)) else: self.scale_param = None if self.center: self.center_param = nn.Parameter(torch.zeros(features)) else: self.center_param = None def forward(self, input_0): primals_2 = self.center_param primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
NagisaZj/ProMP
LayerNorm
false
11,727
[ "MIT" ]
0
539739ae2b7d5fdcad00855da695f643b23df4b3
https://github.com/NagisaZj/ProMP/tree/539739ae2b7d5fdcad00855da695f643b23df4b3
CausalConv1d
import torch from torch import nn as nn class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 80 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 5 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 2), (8, 2, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 5), (20, 5, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(80)](buf1, primals_2, 80, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4, 4), (20, 5, 1), 0 ), primals_1, primals_3 class CausalConv1dNew(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1dNew, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d(in_channels, out_channels, 2, padding= self.padding, dilation=dilation) def forward(self, input_0): primals_1 = self.causal_conv.weight primals_2 = self.causal_conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
NagisaZj/ProMP
CausalConv1d
false
11,728
[ "MIT" ]
0
539739ae2b7d5fdcad00855da695f643b23df4b3
https://github.com/NagisaZj/ProMP/tree/539739ae2b7d5fdcad00855da695f643b23df4b3
Stoplinear
import torch from collections import OrderedDict import torch.nn as nn class Linear(nn.Module): """ Linear Module """ def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """ :param in_dim: dimension of input :param out_dim: dimension of output :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): return self.linear_layer(x) class Stoplinear(nn.Module): """ Adding hidden layer to stopplinear to improve it """ def __init__(self, input_size, p=0.1): super(Stoplinear, self).__init__() self.input_size = input_size self.hidden_size = input_size // 3 self.layers = nn.Sequential(OrderedDict([('fc1', Linear(self. input_size, self.hidden_size, w_init='sigmoid')), ('activation', nn.ReLU()), ('dropout', nn.Dropout(p)), ('fc2', Linear(self. hidden_size, 1, w_init='sigmoid'))])) def forward(self, input_): out = self.layers(input_) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from collections import OrderedDict import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp3) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x0, tmp5, xmask) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1, primals_2, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf3 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 1), ( 1, 0), 0), primals_4, alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 1), (1, 1), 0), primals_4, buf4 class Linear(nn.Module): """ Linear Module """ def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """ :param in_dim: dimension of input :param out_dim: dimension of output :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): return self.linear_layer(x) class StoplinearNew(nn.Module): """ Adding hidden layer to stopplinear to improve it """ def __init__(self, input_size, p=0.1): super(StoplinearNew, self).__init__() self.input_size = input_size self.hidden_size = input_size // 3 self.layers = nn.Sequential(OrderedDict([('fc1', Linear(self. input_size, self.hidden_size, w_init='sigmoid')), ('activation', nn.ReLU()), ('dropout', nn.Dropout(p)), ('fc2', Linear(self. hidden_size, 1, w_init='sigmoid'))])) def forward(self, input_0): primals_1 = self.layers.fc1.linear_layer.weight primals_2 = self.layers.fc1.linear_layer.bias primals_4 = self.layers.fc2.linear_layer.weight primals_5 = self.layers.fc2.linear_layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Munna-Manoj/Team7_TTS
Stoplinear
false
11,729
[ "MIT" ]
0
5e2d473a2afe429023876bcc51c2ac966a4938b8
https://github.com/Munna-Manoj/Team7_TTS/tree/5e2d473a2afe429023876bcc51c2ac966a4938b8
FFN
import torch import torch.nn as nn import torch as t class Conv(nn.Module): """ Convolution Module """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """ :param in_channels: dimension of input :param out_channels: dimension of output :param kernel_size: size of kernel :param stride: size of stride :param padding: size of padding :param dilation: dilation rate :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): x = self.conv(x) return x class FFN(nn.Module): """ Positionwise Feed-Forward Network """ def __init__(self, num_hidden): """ :param num_hidden: dimension of hidden """ super(FFN, self).__init__() self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init= 'relu') self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1) self.dropout = nn.Dropout(p=0.1) self.layer_norm = nn.LayerNorm(num_hidden) def forward(self, input_): x = input_.transpose(1, 2) x = self.w_2(t.relu(self.w_1(x))) x = x.transpose(1, 2) x = x + input_ x = self.layer_norm(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (16, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (16,), (1,)) assert_size_stride(primals_4, (4, 16, 1), (16, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf1, (4, 16, 4), (64, 4, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_3 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 4), (16, 4, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_2[grid(64)](buf4, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_3[grid(16)](buf4, primals_1, buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = buf0 del buf0 triton_poi_fused_add_native_layer_norm_4[grid(16, 4)](buf4, primals_1, buf5, buf6, primals_6, primals_7, buf7, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf5 del buf6 del primals_7 return buf7, primals_1, primals_2, primals_4, primals_6, buf2, buf4 class Conv(nn.Module): """ Convolution Module """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """ :param in_channels: dimension of input :param out_channels: dimension of output :param kernel_size: size of kernel :param stride: size of stride :param padding: size of padding :param dilation: dilation rate :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_(self.conv.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): x = self.conv(x) return x class FFNNew(nn.Module): """ Positionwise Feed-Forward Network """ def __init__(self, num_hidden): """ :param num_hidden: dimension of hidden """ super(FFNNew, self).__init__() self.w_1 = Conv(num_hidden, num_hidden * 4, kernel_size=1, w_init= 'relu') self.w_2 = Conv(num_hidden * 4, num_hidden, kernel_size=1) self.dropout = nn.Dropout(p=0.1) self.layer_norm = nn.LayerNorm(num_hidden) def forward(self, input_0): primals_2 = self.w_1.conv.weight primals_3 = self.w_1.conv.bias primals_4 = self.w_2.conv.weight primals_5 = self.w_2.conv.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Munna-Manoj/Team7_TTS
FFN
false
11,730
[ "MIT" ]
0
5e2d473a2afe429023876bcc51c2ac966a4938b8
https://github.com/Munna-Manoj/Team7_TTS/tree/5e2d473a2afe429023876bcc51c2ac966a4938b8
Encoder
import torch import torch.nn as nn import torch.nn.functional as F class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: 缩放因子 论文为根号dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_Attention(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_Attention, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): batch_size = x.size(0) Q = self.fc_Q(x) K = self.fc_K(x) V = self.fc_V(x) Q = Q.view(batch_size * self.num_head, -1, self.dim_head) K = K.view(batch_size * self.num_head, -1, self.dim_head) V = V.view(batch_size * self.num_head, -1, self.dim_head) scale = K.size(-1) ** -0.5 context = self.attention(Q, K, V, scale) context = context.view(batch_size, -1, self.dim_head * self.num_head) out = self.fc(context) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class Position_wise_Feed_Forward(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_Forward, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class Encoder(nn.Module): def __init__(self, dim_model, num_head, hidden, dropout): super(Encoder, self).__init__() self.attention = Multi_Head_Attention(dim_model, num_head, dropout) self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout) def forward(self, x): out = self.attention(x) out = self.feed_forward(out) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'num_head': 4, 'hidden': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp3 = tmp2 - tmp2 tmp4 = tmp3 * tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 / tmp5 tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x2, tmp16, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex // 16 x3 = xindex % 16 x4 = xindex // 4 x5 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x5, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) assert_size_stride(primals_14, (4, 4), (4, 1)) assert_size_stride(primals_15, (4,), (1,)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor( primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, primals_1, reinterpret_tensor( primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, primals_1, reinterpret_tensor( primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_6 del primals_7 buf3 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0), out=buf3) buf4 = buf3 del buf3 get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 1, 1), (1, 1, 1), torch.float32) extern_kernels.bmm(buf4, reinterpret_tensor(buf2, (16, 1, 1), (1, 1, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (4, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha =1, beta=1, out=buf6) del primals_9 buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(16)](buf6, primals_1, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(64)](buf6, primals_1, buf7, buf8, primals_10, primals_11, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_11 buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf10) buf11 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0) del buf10 buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_3[grid(64)](buf11, primals_13, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_13 buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 4), (1, 4), 0), out=buf12) buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0) del buf12 triton_poi_fused_add_4[grid(64)](buf13, primals_15, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_15 buf14 = buf8 del buf8 buf15 = buf7 del buf7 triton_poi_fused_native_layer_norm_5[grid(16)](buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_6[grid(64)](buf13, buf14, buf15, primals_16, primals_17, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf14 del buf15 del primals_17 return buf16, primals_1, primals_10, primals_16, buf4, reinterpret_tensor( buf5, (4, 4), (4, 1), 0), buf6, reinterpret_tensor(buf9, (16, 4), ( 4, 1), 0), reinterpret_tensor(buf11, (16, 4), (4, 1), 0 ), buf13, primals_14, buf17, primals_12, primals_8, reinterpret_tensor( buf2, (16, 1, 1), (1, 1, 1), 0), reinterpret_tensor(buf0, (16, 1, 1 ), (1, 1, 1), 0), reinterpret_tensor(buf1, (16, 1, 1), (1, 1, 1), 0) class Scaled_Dot_Product_Attention(nn.Module): """Scaled Dot-Product Attention """ def __init__(self): super(Scaled_Dot_Product_Attention, self).__init__() def forward(self, Q, K, V, scale=None): """ Args: Q: [batch_size, len_Q, dim_Q] K: [batch_size, len_K, dim_K] V: [batch_size, len_V, dim_V] scale: 缩放因子 论文为根号dim_K Return: self-attention后的张量,以及attention张量 """ attention = torch.matmul(Q, K.permute(0, 2, 1)) if scale: attention = attention * scale attention = F.softmax(attention, dim=-1) context = torch.matmul(attention, V) return context class Multi_Head_Attention(nn.Module): def __init__(self, dim_model, num_head, dropout=0.0): super(Multi_Head_Attention, self).__init__() self.num_head = num_head assert dim_model % num_head == 0 self.dim_head = dim_model // self.num_head self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head) self.fc_K = nn.Linear(dim_model, num_head * self.dim_head) self.fc_V = nn.Linear(dim_model, num_head * self.dim_head) self.attention = Scaled_Dot_Product_Attention() self.fc = nn.Linear(num_head * self.dim_head, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): batch_size = x.size(0) Q = self.fc_Q(x) K = self.fc_K(x) V = self.fc_V(x) Q = Q.view(batch_size * self.num_head, -1, self.dim_head) K = K.view(batch_size * self.num_head, -1, self.dim_head) V = V.view(batch_size * self.num_head, -1, self.dim_head) scale = K.size(-1) ** -0.5 context = self.attention(Q, K, V, scale) context = context.view(batch_size, -1, self.dim_head * self.num_head) out = self.fc(context) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class Position_wise_Feed_Forward(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_Forward, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out class EncoderNew(nn.Module): def __init__(self, dim_model, num_head, hidden, dropout): super(EncoderNew, self).__init__() self.attention = Multi_Head_Attention(dim_model, num_head, dropout) self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout) def forward(self, input_0): primals_1 = self.attention.fc_Q.weight primals_3 = self.attention.fc_Q.bias primals_2 = self.attention.fc_K.weight primals_5 = self.attention.fc_K.bias primals_4 = self.attention.fc_V.weight primals_7 = self.attention.fc_V.bias primals_6 = self.attention.fc.weight primals_9 = self.attention.fc.bias primals_10 = self.attention.layer_norm.weight primals_11 = self.attention.layer_norm.bias primals_8 = self.feed_forward.fc1.weight primals_13 = self.feed_forward.fc1.bias primals_12 = self.feed_forward.fc2.weight primals_15 = self.feed_forward.fc2.bias primals_16 = self.feed_forward.layer_norm.weight primals_17 = self.feed_forward.layer_norm.bias primals_14 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17]) return output[0]
Moon-xm/Chinese-Text-Classification-Pytorch
Encoder
false
11,731
[ "MIT" ]
0
19fe64006418bf4296f884e4d1f038c17b34d3de
https://github.com/Moon-xm/Chinese-Text-Classification-Pytorch/tree/19fe64006418bf4296f884e4d1f038c17b34d3de
Position_wise_Feed_Forward
import torch import torch.nn as nn import torch.nn.functional as F class Position_wise_Feed_Forward(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_Forward, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, x): out = self.fc1(x) out = F.relu(out) out = self.fc2(out) out = self.dropout(out) out = out + x out = self.layer_norm(out) return out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim_model': 4, 'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, primals_2, buf6, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused_add_native_layer_norm_1[grid(64)](buf2, primals_3, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_2[grid(256)](buf2, primals_3, buf3, buf4, primals_6, primals_7, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del buf4 del primals_7 return buf5, primals_3, primals_6, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, primals_4, buf6 class Position_wise_Feed_ForwardNew(nn.Module): def __init__(self, dim_model, hidden, dropout=0.0): super(Position_wise_Feed_ForwardNew, self).__init__() self.fc1 = nn.Linear(dim_model, hidden) self.fc2 = nn.Linear(hidden, dim_model) self.dropout = nn.Dropout(dropout) self.layer_norm = nn.LayerNorm(dim_model) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_6 = self.layer_norm.weight primals_7 = self.layer_norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
Moon-xm/Chinese-Text-Classification-Pytorch
Position_wise_Feed_Forward
false
11,732
[ "MIT" ]
0
19fe64006418bf4296f884e4d1f038c17b34d3de
https://github.com/Moon-xm/Chinese-Text-Classification-Pytorch/tree/19fe64006418bf4296f884e4d1f038c17b34d3de
MultiheadAttention
import math import torch import torch.nn as nn import torch as t class MultiheadAttention(nn.Module): """ Multihead attention mechanism (dot attention) """ def __init__(self, num_hidden_k): """ :param num_hidden_k: dimension of hidden """ super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None): attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]) ] def get_init_inputs(): return [[], {'num_hidden_k': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = 0.5 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tl.store(out_ptr0 + x2, tmp17, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), ( 16, 1, 4), 0), out=buf0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 extern_kernels.bmm(buf2, arg2_1, out=buf3) del arg2_1 return buf3, buf2 class MultiheadAttentionNew(nn.Module): """ Multihead attention mechanism (dot attention) """ def __init__(self, num_hidden_k): """ :param num_hidden_k: dimension of hidden """ super(MultiheadAttentionNew, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0], output[1]
Munna-Manoj/Team7_TTS
MultiheadAttention
false
11,733
[ "MIT" ]
0
5e2d473a2afe429023876bcc51c2ac966a4938b8
https://github.com/Munna-Manoj/Team7_TTS/tree/5e2d473a2afe429023876bcc51c2ac966a4938b8
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp75 = 0.25 tmp76 = tmp74 * tmp75 tmp77 = tmp50 - tmp76 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp77, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf12 = buf0 del buf0 buf13 = buf12 del buf12 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf13, arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf13, class DiceLossNew(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLossNew, self).__init__() def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation
DiceLoss
false
11,734
[ "MIT" ]
0
24ca4432873f145ad33810f40c851ac10bf030fa
https://github.com/NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation/tree/24ca4432873f145ad33810f40c851ac10bf030fa
BCEDiceLoss
import torch import torch.nn as nn import torch.nn.functional as F class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient class BCEDiceLoss(nn.Module): """ Combination of Binary Cross Entropy Loss and Soft Dice Loss. This combined loss is used to train the network so that both benefits of the loss are leveraged. """ def __init__(self, device): """Simple constructor for the class.""" super(BCEDiceLoss, self).__init__() self.dice_loss = DiceLoss() def forward(self, predicted, target): """ Method for calculation of combined loss from sample.""" return F.binary_cross_entropy(predicted, target) + self.dice_loss( predicted, target) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'device': 0}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None) @triton.jit def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1(in_out_ptr1, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp12 = tl.load(in_ptr0 + (64 + r0), None) tmp13 = tl.load(in_ptr1 + (64 + r0), None) tmp24 = tl.load(in_ptr0 + (128 + r0), None) tmp25 = tl.load(in_ptr1 + (128 + r0), None) tmp36 = tl.load(in_ptr0 + (192 + r0), None) tmp37 = tl.load(in_ptr1 + (192 + r0), None) tmp75 = tl.load(in_out_ptr1 + 0) tmp76 = tl.broadcast_to(tmp75, [XBLOCK, 1]) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp8 = tl.sum(tmp6, 1)[:, None] tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp11 = tl.sum(tmp9, 1)[:, None] tmp14 = tmp12 * tmp13 tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK]) tmp17 = tl.sum(tmp15, 1)[:, None] tmp18 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp23 = tl.sum(tmp21, 1)[:, None] tmp26 = tmp24 * tmp25 tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK]) tmp29 = tl.sum(tmp27, 1)[:, None] tmp30 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK]) tmp32 = tl.sum(tmp30, 1)[:, None] tmp33 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK]) tmp35 = tl.sum(tmp33, 1)[:, None] tmp38 = tmp36 * tmp37 tmp39 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK]) tmp41 = tl.sum(tmp39, 1)[:, None] tmp42 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK]) tmp44 = tl.sum(tmp42, 1)[:, None] tmp45 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK]) tmp47 = tl.sum(tmp45, 1)[:, None] tmp48 = 2.0 tmp49 = tmp5 * tmp48 tmp50 = 1.0 tmp51 = tmp49 + tmp50 tmp52 = tmp8 + tmp11 tmp53 = tmp52 + tmp50 tmp54 = tmp51 / tmp53 tmp55 = 0.0 tmp56 = tmp54 + tmp55 tmp57 = tmp17 * tmp48 tmp58 = tmp57 + tmp50 tmp59 = tmp20 + tmp23 tmp60 = tmp59 + tmp50 tmp61 = tmp58 / tmp60 tmp62 = tmp56 + tmp61 tmp63 = tmp29 * tmp48 tmp64 = tmp63 + tmp50 tmp65 = tmp32 + tmp35 tmp66 = tmp65 + tmp50 tmp67 = tmp64 / tmp66 tmp68 = tmp62 + tmp67 tmp69 = tmp41 * tmp48 tmp70 = tmp69 + tmp50 tmp71 = tmp44 + tmp47 tmp72 = tmp71 + tmp50 tmp73 = tmp70 / tmp72 tmp74 = tmp68 + tmp73 tmp77 = 256.0 tmp78 = tmp76 / tmp77 tmp79 = 0.25 tmp80 = tmp74 * tmp79 tmp81 = tmp50 - tmp80 tmp82 = tmp78 + tmp81 tl.debug_barrier() tl.store(in_out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp82, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) get_raw_stream(0) triton_per_fused_binary_cross_entropy_0[grid(1)](arg0_1, arg1_1, buf0, 1, 256, num_warps=2, num_stages=1) buf14 = buf0 del buf0 triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_1[grid(1)]( buf14, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf14, class DiceLoss(nn.Module): """Sørensen–Dice coefficient loss to calculate the mean loss over a batch of data.This loss mainly calculates the similarity between two samples. To know more about this loss check this link: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient """ def __init__(self): """Simple constructor for the class.""" super(DiceLoss, self).__init__() def forward(self, predicted, target): """ Method for calculation of loss from sample. Parameters: predicted(torch.Tensor): Predicted output of the network. Shape - (Batch Size,Channel,Height,Width) target(torch.Tensor): Actual required output for the network Shape - (Batch Size,Channel,Height,Width) Returns: The mean dice Loss over the batch size. """ batch = predicted.size()[0] batch_loss = 0 for index in range(batch): coefficient = self._dice_coefficient(predicted[index], target[ index]) batch_loss += coefficient batch_loss = batch_loss / batch return 1 - batch_loss def _dice_coefficient(self, predicted, target): """Calculates the Sørensen–Dice Coefficient for a single sample. Parameters: predicted(torch.Tensor): Predicted single output of the network. Shape - (Channel,Height,Width) target(torch.Tensor): Actual required single output for the network Shape - (Channel,Height,Width) Returns: coefficient(torch.Tensor): Dice coefficient for the input sample. 1 represents high similarity and 0 represents low similarity. """ smooth = 1 product = torch.mul(predicted, target) intersection = product.sum() coefficient = (2 * intersection + smooth) / (predicted.sum() + target.sum() + smooth) return coefficient class BCEDiceLossNew(nn.Module): """ Combination of Binary Cross Entropy Loss and Soft Dice Loss. This combined loss is used to train the network so that both benefits of the loss are leveraged. """ def __init__(self, device): """Simple constructor for the class.""" super(BCEDiceLossNew, self).__init__() self.dice_loss = DiceLoss() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation
BCEDiceLoss
false
11,735
[ "MIT" ]
0
24ca4432873f145ad33810f40c851ac10bf030fa
https://github.com/NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation/tree/24ca4432873f145ad33810f40c851ac10bf030fa
GraphConv
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init class MeanAggregator(nn.Module): def forward(self, features, A): x = torch.bmm(A, features) return x class GraphConv(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim)) self.bias = nn.Parameter(torch.FloatTensor(out_dim)) init.xavier_uniform_(self.weight) init.constant_(self.bias, 0) self.aggregator = MeanAggregator() def forward(self, features, A): _b, _n, d = features.shape assert d == self.in_dim agg_feats = self.aggregator(features, A) cat_feats = torch.cat([features, agg_feats], dim=2) out = torch.einsum('bnd,df->bnf', cat_feats, self.weight) out = F.relu(out + self.bias) return out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_dim': 4, 'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (8, 4), (4, 1)) assert_size_stride(primals_4, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(primals_2, primals_1, out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(128)](primals_1, buf0, buf1, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf2 = reinterpret_tensor(buf0, (1, 16, 4), (64, 4, 1), 0) del buf0 extern_kernels.bmm(reinterpret_tensor(buf1, (1, 16, 8), (0, 8, 1), 0), reinterpret_tensor(primals_3, (1, 8, 4), (32, 4, 1), 0), out=buf2) del primals_3 buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf3, primals_4, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_4 return buf3, buf4, reinterpret_tensor(buf1, (1, 8, 16), (128, 1, 8), 0) class MeanAggregator(nn.Module): def forward(self, features, A): x = torch.bmm(A, features) return x class GraphConvNew(nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.in_dim = in_dim self.out_dim = out_dim self.weight = nn.Parameter(torch.FloatTensor(in_dim * 2, out_dim)) self.bias = nn.Parameter(torch.FloatTensor(out_dim)) init.xavier_uniform_(self.weight) init.constant_(self.bias, 0) self.aggregator = MeanAggregator() def forward(self, input_0, input_1): primals_3 = self.weight primals_4 = self.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
NceBoy/mmocr
GraphConv
false
11,736
[ "Apache-2.0" ]
0
3fb7a18d7eb44799e75c1991e5da2044b458d411
https://github.com/NceBoy/mmocr/tree/3fb7a18d7eb44799e75c1991e5da2044b458d411
Vol
import math import torch from torch import Tensor import torchaudio.functional as F class Vol(torch.nn.Module): """Add a volume to an waveform. Args: gain (float): Interpreted according to the given gain_type: If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio. If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared). If ``gain_type`` = ``db``, ``gain`` is in decibels. gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``) """ def __init__(self, gain: 'float', gain_type: 'str'='amplitude'): super(Vol, self).__init__() self.gain = gain self.gain_type = gain_type if gain_type in ['amplitude', 'power'] and gain < 0: raise ValueError( 'If gain_type = amplitude or power, gain must be positive.') def forward(self, waveform: 'Tensor') ->Tensor: """ Args: waveform (Tensor): Tensor of audio of dimension (..., time). Returns: Tensor: Tensor of audio of dimension (..., time). """ if self.gain_type == 'amplitude': waveform = waveform * self.gain if self.gain_type == 'db': waveform = F.gain(waveform, self.gain) if self.gain_type == 'power': waveform = F.gain(waveform, 10 * math.log10(self.gain)) return torch.clamp(waveform, -1, 1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'gain': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 4.0 tmp2 = tmp0 * tmp1 tmp3 = -1.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 1.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class VolNew(torch.nn.Module): """Add a volume to an waveform. Args: gain (float): Interpreted according to the given gain_type: If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio. If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared). If ``gain_type`` = ``db``, ``gain`` is in decibels. gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``) """ def __init__(self, gain: 'float', gain_type: 'str'='amplitude'): super(VolNew, self).__init__() self.gain = gain self.gain_type = gain_type if gain_type in ['amplitude', 'power'] and gain < 0: raise ValueError( 'If gain_type = amplitude or power, gain must be positive.') def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nayef211/audio
Vol
false
11,737
[ "BSD-2-Clause" ]
0
241ab1e8284e589262f510ee9411baf2bc374ded
https://github.com/Nayef211/audio/tree/241ab1e8284e589262f510ee9411baf2bc374ded
ComputeDeltas
import torch from torch import Tensor import torchaudio.functional as F class ComputeDeltas(torch.nn.Module): """Compute delta coefficients of a tensor, usually a spectrogram. See `torchaudio.functional.compute_deltas` for more details. Args: win_length (int): The window length used for computing delta. (Default: ``5``) mode (str): Mode parameter passed to padding. (Default: ``'replicate'``) """ __constants__ = ['win_length'] def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None: super(ComputeDeltas, self).__init__() self.win_length = win_length self.mode = mode def forward(self, specgram: 'Tensor') ->Tensor: """ Args: specgram (Tensor): Tensor of audio of dimension (..., freq, time). Returns: Tensor: Tensor of deltas of dimension (..., freq, time). """ return F.compute_deltas(specgram, win_length=self.win_length, mode= self.mode) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_replication_pad1d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = tl.load(in_ptr0 + (4 * x1 + (3 * (3 <= 0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) + (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0)) * (0 * (0 >= -2 + x0) + (-2 + x0) * (-2 + x0 > 0) < 3))), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused_arange_repeat_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 5 x2 = xindex tmp0 = -2 + x0 tmp1 = tmp0.to(tl.float32) tl.store(out_ptr0 + x2, tmp1, xmask) @triton.jit def triton_poi_fused_div_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = 0.1 tmp2 = tmp0 * tmp1 tl.store(in_out_ptr0 + x0, tmp2, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 64, 8), (512, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_replication_pad1d_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((64, 1, 5), (5, 5, 1), torch.float32) triton_poi_fused_arange_repeat_1[grid(320)](buf1, 320, XBLOCK=256, num_warps=4, num_stages=1) buf2 = extern_kernels.convolution(buf0, buf1, stride=(1,), padding= (0,), dilation=(1,), transposed=False, output_padding=(0,), groups=64, bias=None) assert_size_stride(buf2, (1, 64, 4), (256, 4, 1)) del buf0 del buf1 buf3 = buf2 del buf2 triton_poi_fused_div_2[grid(256)](buf3, 256, XBLOCK=256, num_warps= 4, num_stages=1) return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), class ComputeDeltasNew(torch.nn.Module): """Compute delta coefficients of a tensor, usually a spectrogram. See `torchaudio.functional.compute_deltas` for more details. Args: win_length (int): The window length used for computing delta. (Default: ``5``) mode (str): Mode parameter passed to padding. (Default: ``'replicate'``) """ __constants__ = ['win_length'] def __init__(self, win_length: 'int'=5, mode: 'str'='replicate') ->None: super(ComputeDeltasNew, self).__init__() self.win_length = win_length self.mode = mode def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nayef211/audio
ComputeDeltas
false
11,738
[ "BSD-2-Clause" ]
0
241ab1e8284e589262f510ee9411baf2bc374ded
https://github.com/Nayef211/audio/tree/241ab1e8284e589262f510ee9411baf2bc374ded
MuLawEncoding
import torch from torch import Tensor import torchaudio.functional as F class MuLawEncoding(torch.nn.Module): """Encode signal based on mu-law companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled to between -1 and 1 and returns a signal encoded with values from 0 to quantization_channels - 1 Args: quantization_channels (int, optional): Number of channels. (Default: ``256``) """ __constants__ = ['quantization_channels'] def __init__(self, quantization_channels: 'int'=256) ->None: super(MuLawEncoding, self).__init__() self.quantization_channels = quantization_channels def forward(self, x: 'Tensor') ->Tensor: """ Args: x (Tensor): A signal to be encoded. Returns: x_mu (Tensor): An encoded signal. """ return F.mu_law_encoding(x, self.quantization_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_abs_add_div_lift_fresh_log1p_mul_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = tmp1 < tmp0 tmp3 = tmp2.to(tl.int8) tmp4 = tmp0 < tmp1 tmp5 = tmp4.to(tl.int8) tmp6 = tmp3 - tmp5 tmp7 = tmp6.to(tmp0.dtype) tmp8 = tl_math.abs(tmp0) tmp9 = 255.0 tmp10 = tmp9 * tmp8 tmp11 = libdevice.log1p(tmp10) tmp12 = tmp7 * tmp11 tmp13 = 0.18033687961558437 tmp14 = tmp12 * tmp13 tmp15 = 1.0 tmp16 = tmp14 + tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tmp18 * tmp9 tmp20 = tmp19 + tmp17 tmp21 = tmp20.to(tl.int64) tl.store(out_ptr0 + x0, tmp21, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64) get_raw_stream(0) triton_poi_fused__to_copy_abs_add_div_lift_fresh_log1p_mul_sign_0[grid (256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MuLawEncodingNew(torch.nn.Module): """Encode signal based on mu-law companding. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This algorithm assumes the signal has been scaled to between -1 and 1 and returns a signal encoded with values from 0 to quantization_channels - 1 Args: quantization_channels (int, optional): Number of channels. (Default: ``256``) """ __constants__ = ['quantization_channels'] def __init__(self, quantization_channels: 'int'=256) ->None: super(MuLawEncodingNew, self).__init__() self.quantization_channels = quantization_channels def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nayef211/audio
MuLawEncoding
false
11,739
[ "BSD-2-Clause" ]
0
241ab1e8284e589262f510ee9411baf2bc374ded
https://github.com/Nayef211/audio/tree/241ab1e8284e589262f510ee9411baf2bc374ded
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 32, 3) self.pool2 = nn.MaxPool2d(3, 3) self.fc1 = nn.Linear(36 * 36 * 32, 64) self.drop1 = nn.Dropout(0.5) self.fc2 = nn.Linear(64, 136) def forward(self, x): x = self.pool1(F.relu(self.conv1(x))) x = self.pool2(F.relu(self.conv2(x))) x = x.view(-1, 36 * 36 * 32) x = self.drop1(F.relu(self.fc1(x))) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 1, 121, 121])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1752192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 13689 % 32 x0 = xindex % 13689 x4 = xindex // 13689 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 13696 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 430592 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 58 x1 = xindex // 58 % 58 x2 = xindex // 3364 x3 = xindex % 3364 tmp0 = tl.load(in_ptr0 + (2 * x0 + 234 * x1 + 13696 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 234 * x1 + 13696 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (117 + 2 * x0 + 234 * x1 + 13696 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (118 + 2 * x0 + 234 * x1 + 13696 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3 + 3392 * x2), tmp6, xmask) tl.store(out_ptr1 + (x3 + 3456 * x2), tmp16, xmask) @triton.jit def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 3136 % 32 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 41472 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x2 = xindex // 324 x3 = xindex tmp0 = tl.load(in_ptr0 + (3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (56 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (57 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (58 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (112 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (113 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (114 + 3 * x0 + 168 * x1 + 3136 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp17 = tmp1 > tmp0 tmp18 = tl.full([1], 1, tl.int8) tmp19 = tl.full([1], 0, tl.int8) tmp20 = tl.where(tmp17, tmp18, tmp19) tmp21 = tmp3 > tmp2 tmp22 = tl.full([1], 2, tl.int8) tmp23 = tl.where(tmp21, tmp22, tmp20) tmp24 = tmp5 > tmp4 tmp25 = tl.full([1], 3, tl.int8) tmp26 = tl.where(tmp24, tmp25, tmp23) tmp27 = tmp7 > tmp6 tmp28 = tl.full([1], 4, tl.int8) tmp29 = tl.where(tmp27, tmp28, tmp26) tmp30 = tmp9 > tmp8 tmp31 = tl.full([1], 5, tl.int8) tmp32 = tl.where(tmp30, tmp31, tmp29) tmp33 = tmp11 > tmp10 tmp34 = tl.full([1], 6, tl.int8) tmp35 = tl.where(tmp33, tmp34, tmp32) tmp36 = tmp13 > tmp12 tmp37 = tl.full([1], 7, tl.int8) tmp38 = tl.where(tmp36, tmp37, tmp35) tmp39 = tmp15 > tmp14 tmp40 = tl.full([1], 8, tl.int8) tmp41 = tl.where(tmp39, tmp40, tmp38) tl.store(out_ptr0 + x3, tmp16, xmask) tl.store(out_ptr1 + x3, tmp41, xmask) @triton.jit def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask) tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x0, tmp4, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (32,), (1,)) assert_size_stride(primals_3, (4, 1, 121, 121), (14641, 14641, 121, 1)) assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1)) assert_size_stride(primals_5, (32,), (1,)) assert_size_stride(primals_6, (64, 41472), (41472, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (136, 64), (64, 1)) assert_size_stride(primals_9, (136,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 32, 117, 117), (438048, 13689, 117, 1)) buf1 = empty_strided_cuda((4, 32, 117, 117), (438272, 13696, 117, 1 ), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(1752192)](buf0, primals_2, buf1, 1752192, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 32, 58, 58), (108544, 3392, 58, 1), torch.float32) buf3 = empty_strided_cuda((4, 32, 58, 58), (110592, 3456, 58, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_1[grid(430592)](buf1, buf2, buf3, 430592, XBLOCK=1024, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 32, 56, 56), (100352, 3136, 56, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_relu_2[grid(401408)](buf5, primals_5, 401408, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 32, 18, 18), (10368, 324, 18, 1), torch.float32) buf7 = empty_strided_cuda((4, 32, 18, 18), (10368, 324, 18, 1), torch.int8) triton_poi_fused_max_pool2d_with_indices_3[grid(41472)](buf5, buf6, buf7, 41472, XBLOCK=256, num_warps=4, num_stages=1) buf8 = empty_strided_cuda((1, 64), (64, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf6, (1, 41472), (41472, 1), 0), reinterpret_tensor(primals_6, (41472, 64), (1, 41472), 0), out=buf8) buf9 = buf8 del buf8 triton_poi_fused_relu_4[grid(64)](buf9, primals_7, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_7 buf10 = empty_strided_cuda((1, 136), (136, 1), torch.float32) extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (64, 136), (1, 64), 0), alpha=1, beta=1, out=buf10) del primals_9 return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf7, reinterpret_tensor(buf6, (1, 41472), (41472, 1), 0), buf9, primals_8, primals_6) class NetNew(nn.Module): def __init__(self): super(NetNew, self).__init__() self.conv1 = nn.Conv2d(1, 32, 5) self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(32, 32, 3) self.pool2 = nn.MaxPool2d(3, 3) self.fc1 = nn.Linear(36 * 36 * 32, 64) self.drop1 = nn.Dropout(0.5) self.fc2 = nn.Linear(64, 136) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.fc1.weight primals_7 = self.fc1.bias primals_8 = self.fc2.weight primals_9 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
MonteYang/P1_Facial_Keypoints
Net
false
11,740
[ "MIT" ]
0
1e3e4c9c6b48ec241f6fc7e072b25c7211cebd18
https://github.com/MonteYang/P1_Facial_Keypoints/tree/1e3e4c9c6b48ec241f6fc7e072b25c7211cebd18
Attention
import math import torch import torch.nn as nn import torch as t class Linear(nn.Module): """ Linear Module """ def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """ :param in_dim: dimension of input :param out_dim: dimension of output :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): return self.linear_layer(x) class MultiheadAttention(nn.Module): """ Multihead attention mechanism (dot attention) """ def __init__(self, num_hidden_k): """ :param num_hidden_k: dimension of hidden """ super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None): attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn class Attention(nn.Module): """ Attention Network """ def __init__(self, num_hidden, h=4): """ :param num_hidden: dimension of hidden :param h: num of heads """ super(Attention, self).__init__() self.num_hidden = num_hidden self.num_hidden_per_attn = num_hidden // h self.h = h self.key = Linear(num_hidden, num_hidden, bias=False) self.value = Linear(num_hidden, num_hidden, bias=False) self.query = Linear(num_hidden, num_hidden, bias=False) self.multihead = MultiheadAttention(self.num_hidden_per_attn) self.residual_dropout = nn.Dropout(p=0.1) self.final_linear = Linear(num_hidden * 2, num_hidden) self.layer_norm_1 = nn.LayerNorm(num_hidden) def forward(self, memory, decoder_input, mask=None, query_mask=None): batch_size = memory.size(0) seq_k = memory.size(1) seq_q = decoder_input.size(1) if query_mask is not None: query_mask = query_mask.unsqueeze(-1).repeat(1, 1, seq_k) query_mask = query_mask.repeat(self.h, 1, 1) if mask is not None: mask = mask.repeat(self.h, 1, 1) key = self.key(memory).view(batch_size, seq_k, self.h, self. num_hidden_per_attn) value = self.value(memory).view(batch_size, seq_k, self.h, self. num_hidden_per_attn) query = self.query(decoder_input).view(batch_size, seq_q, self.h, self.num_hidden_per_attn) key = key.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self. num_hidden_per_attn) value = value.permute(2, 0, 1, 3).contiguous().view(-1, seq_k, self .num_hidden_per_attn) query = query.permute(2, 0, 1, 3).contiguous().view(-1, seq_q, self .num_hidden_per_attn) result, attns = self.multihead(key, value, query, mask=mask, query_mask=query_mask) result = result.view(self.h, batch_size, seq_q, self. num_hidden_per_attn) result = result.permute(1, 2, 0, 3).contiguous().view(batch_size, seq_q, -1) result = t.cat([decoder_input, result], dim=-1) result = self.final_linear(result) result = result + decoder_input result = self.layer_norm_1(result) return result, attns def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'num_hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch as t assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_cat_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (x1 + 16 * (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, 4), (4, 1)) assert_size_stride(primals_6, (4, 8), (8, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf0) del primals_3 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf2) del primals_5 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(4, 16)](buf2, buf3, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf4 = reinterpret_tensor(buf2, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf2 triton_poi_fused_clone_0[grid(4, 16)](buf0, buf4, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_1[grid(256)](buf5, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1) buf7 = buf5 del buf5 triton_poi_fused__softmax_2[grid(256)](buf6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf6 buf8 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf0 triton_poi_fused_clone_0[grid(4, 16)](buf1, buf8, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 1), 0) del buf1 extern_kernels.bmm(buf7, reinterpret_tensor(buf8, (16, 4, 1), (4, 1, 0), 0), out=buf9) buf10 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32) triton_poi_fused_cat_3[grid(128)](primals_2, buf9, buf10, 128, XBLOCK=128, num_warps=4, num_stages=1) buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0) del buf9 extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(16)](buf11, primals_2, buf12, buf13, 16, XBLOCK=16, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(64)](buf11, primals_2, buf12, buf13, primals_8, primals_9, buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf12 del buf13 del primals_9 return buf14, buf7, primals_2, primals_8, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf7, reinterpret_tensor(buf10, (16, 8), (8, 1), 0 ), buf11, primals_6, reinterpret_tensor(buf8, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 1), 0) class Linear(nn.Module): """ Linear Module """ def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """ :param in_dim: dimension of input :param out_dim: dimension of output :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_(self.linear_layer.weight, gain=nn.init. calculate_gain(w_init)) def forward(self, x): return self.linear_layer(x) class MultiheadAttention(nn.Module): """ Multihead attention mechanism (dot attention) """ def __init__(self, num_hidden_k): """ :param num_hidden_k: dimension of hidden """ super(MultiheadAttention, self).__init__() self.num_hidden_k = num_hidden_k self.attn_dropout = nn.Dropout(p=0.1) def forward(self, key, value, query, mask=None, query_mask=None): attn = t.bmm(query, key.transpose(1, 2)) attn = attn / math.sqrt(self.num_hidden_k) if mask is not None: attn = attn.masked_fill(mask, -2 ** 32 + 1) attn = t.softmax(attn, dim=-1) else: attn = t.softmax(attn, dim=-1) if query_mask is not None: attn = attn * query_mask result = t.bmm(attn, value) return result, attn class AttentionNew(nn.Module): """ Attention Network """ def __init__(self, num_hidden, h=4): """ :param num_hidden: dimension of hidden :param h: num of heads """ super(AttentionNew, self).__init__() self.num_hidden = num_hidden self.num_hidden_per_attn = num_hidden // h self.h = h self.key = Linear(num_hidden, num_hidden, bias=False) self.value = Linear(num_hidden, num_hidden, bias=False) self.query = Linear(num_hidden, num_hidden, bias=False) self.multihead = MultiheadAttention(self.num_hidden_per_attn) self.residual_dropout = nn.Dropout(p=0.1) self.final_linear = Linear(num_hidden * 2, num_hidden) self.layer_norm_1 = nn.LayerNorm(num_hidden) def forward(self, input_0, input_1): primals_3 = self.key.linear_layer.weight primals_4 = self.value.linear_layer.weight primals_5 = self.query.linear_layer.weight primals_6 = self.final_linear.linear_layer.weight primals_7 = self.final_linear.linear_layer.bias primals_8 = self.layer_norm_1.weight primals_9 = self.layer_norm_1.bias primals_1 = input_0 primals_2 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
Munna-Manoj/Team7_TTS
Attention
false
11,741
[ "MIT" ]
0
5e2d473a2afe429023876bcc51c2ac966a4938b8
https://github.com/Munna-Manoj/Team7_TTS/tree/5e2d473a2afe429023876bcc51c2ac966a4938b8
SlidingWindowCmn
import torch from torch import Tensor import torchaudio.functional as F class SlidingWindowCmn(torch.nn.Module): """ Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. Args: cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). Only applicable if center == false, ignored if center==true (int, default = 100) center (bool, optional): If true, use a window centered on the current frame (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) """ def __init__(self, cmn_window: 'int'=600, min_cmn_window: 'int'=100, center: 'bool'=False, norm_vars: 'bool'=False) ->None: super().__init__() self.cmn_window = cmn_window self.min_cmn_window = min_cmn_window self.center = center self.norm_vars = norm_vars def forward(self, waveform: 'Tensor') ->Tensor: """ Args: waveform (Tensor): Tensor of audio of dimension (..., time). Returns: Tensor: Tensor of audio of dimension (..., time). """ cmn_waveform = F.sliding_window_cmn(waveform, self.cmn_window, self .min_cmn_window, self.center, self.norm_vars) return cmn_waveform def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_copy_div_sub_zeros_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 4 x0 = xindex % 4 x2 = xindex // 16 x3 = xindex tmp3 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp0 = x1 tmp1 = tl.full([1], 3, tl.int32) tmp2 = tmp0 == tmp1 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = tmp8 + tmp3 tmp10 = 0.25 tmp11 = tmp9 * tmp10 tmp12 = tmp3 - tmp11 tmp13 = tl.full([1], 2, tl.int32) tmp14 = tmp0 == tmp13 tmp15 = tmp7 - tmp11 tmp16 = tl.full([1], 1, tl.int32) tmp17 = tmp0 == tmp16 tmp18 = tmp5 - tmp11 tmp19 = tl.full([1], 0, tl.int32) tmp20 = tmp0 == tmp19 tmp21 = tmp4 - tmp11 tmp22 = 0.0 tmp23 = tl.where(tmp20, tmp21, tmp22) tmp24 = tl.where(tmp17, tmp18, tmp23) tmp25 = tl.where(tmp14, tmp15, tmp24) tmp26 = tl.where(tmp2, tmp12, tmp25) tl.store(out_ptr0 + x3, tmp26, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_copy_div_sub_zeros_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), class SlidingWindowCmnNew(torch.nn.Module): """ Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. Args: cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600) min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start). Only applicable if center == false, ignored if center==true (int, default = 100) center (bool, optional): If true, use a window centered on the current frame (to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false) norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false) """ def __init__(self, cmn_window: 'int'=600, min_cmn_window: 'int'=100, center: 'bool'=False, norm_vars: 'bool'=False) ->None: super().__init__() self.cmn_window = cmn_window self.min_cmn_window = min_cmn_window self.center = center self.norm_vars = norm_vars def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nayef211/audio
SlidingWindowCmn
false
11,742
[ "BSD-2-Clause" ]
0
241ab1e8284e589262f510ee9411baf2bc374ded
https://github.com/Nayef211/audio/tree/241ab1e8284e589262f510ee9411baf2bc374ded
AmplitudeToDB
import math import torch from torch import Tensor import torchaudio.functional as F from typing import Optional class AmplitudeToDB(torch.nn.Module): """Turn a tensor from the power/amplitude scale to the decibel scale. This output depends on the maximum value in the input tensor, and so may return different values for an audio clip split into snippets vs. a a full clip. Args: stype (str, optional): scale of input tensor ('power' or 'magnitude'). The power being the elementwise square of the magnitude. (Default: ``'power'``) top_db (float, optional): minimum negative cut-off in decibels. A reasonable number is 80. (Default: ``None``) """ __constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier'] def __init__(self, stype: 'str'='power', top_db: 'Optional[float]'=None ) ->None: super(AmplitudeToDB, self).__init__() self.stype = stype if top_db is not None and top_db < 0: raise ValueError('top_db must be positive value') self.top_db = top_db self.multiplier = 10.0 if stype == 'power' else 20.0 self.amin = 1e-10 self.ref_value = 1.0 self.db_multiplier = math.log10(max(self.amin, self.ref_value)) def forward(self, x: 'Tensor') ->Tensor: """Numerically stable implementation from Librosa. https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html Args: x (Tensor): Input tensor before being converted to decibel scale. Returns: Tensor: Output tensor in decibel scale. """ return F.amplitude_to_DB(x, self.multiplier, self.amin, self. db_multiplier, self.top_db) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_clamp_log10_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1e-10 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = libdevice.log10(tmp2) tmp4 = 10.0 tmp5 = tmp3 * tmp4 tmp6 = 0.0 tmp7 = tmp5 - tmp6 tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_log10_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class AmplitudeToDBNew(torch.nn.Module): """Turn a tensor from the power/amplitude scale to the decibel scale. This output depends on the maximum value in the input tensor, and so may return different values for an audio clip split into snippets vs. a a full clip. Args: stype (str, optional): scale of input tensor ('power' or 'magnitude'). The power being the elementwise square of the magnitude. (Default: ``'power'``) top_db (float, optional): minimum negative cut-off in decibels. A reasonable number is 80. (Default: ``None``) """ __constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier'] def __init__(self, stype: 'str'='power', top_db: 'Optional[float]'=None ) ->None: super(AmplitudeToDBNew, self).__init__() self.stype = stype if top_db is not None and top_db < 0: raise ValueError('top_db must be positive value') self.top_db = top_db self.multiplier = 10.0 if stype == 'power' else 20.0 self.amin = 1e-10 self.ref_value = 1.0 self.db_multiplier = math.log10(max(self.amin, self.ref_value)) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nayef211/audio
AmplitudeToDB
false
11,743
[ "BSD-2-Clause" ]
0
241ab1e8284e589262f510ee9411baf2bc374ded
https://github.com/Nayef211/audio/tree/241ab1e8284e589262f510ee9411baf2bc374ded
DiceLoss
import torch import torch.nn as nn class DiceLoss(nn.Module): def __init__(self, eps=1e-06): super().__init__() assert isinstance(eps, float) self.eps = eps def forward(self, pred, target, mask=None): pred = pred.contiguous().view(pred.size()[0], -1) target = target.contiguous().view(target.size()[0], -1) if mask is not None: mask = mask.contiguous().view(mask.size()[0], -1) pred = pred * mask target = target * mask a = torch.sum(pred * target) b = torch.sum(pred) c = torch.sum(target) d = 2 * a / (b + c + self.eps) return 1 - d def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [RBLOCK]) tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0)) tmp6 = tl.broadcast_to(tmp0, [RBLOCK]) tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0)) tmp9 = tl.broadcast_to(tmp1, [RBLOCK]) tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0)) tmp12 = 2.0 tmp13 = tmp5 * tmp12 tmp14 = tmp8 + tmp11 tmp15 = 1e-06 tmp16 = tmp14 + tmp15 tmp17 = tmp13 / tmp16 tmp18 = 1.0 tmp19 = tmp18 - tmp17 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf3 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf3, class DiceLossNew(nn.Module): def __init__(self, eps=1e-06): super().__init__() assert isinstance(eps, float) self.eps = eps def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
NceBoy/mmocr
DiceLoss
false
11,744
[ "Apache-2.0" ]
0
3fb7a18d7eb44799e75c1991e5da2044b458d411
https://github.com/NceBoy/mmocr/tree/3fb7a18d7eb44799e75c1991e5da2044b458d411
MuLawDecoding
import torch from torch import Tensor import torchaudio.functional as F class MuLawDecoding(torch.nn.Module): """Decode mu-law encoded signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0 and quantization_channels - 1 and returns a signal scaled between -1 and 1. Args: quantization_channels (int, optional): Number of channels. (Default: ``256``) """ __constants__ = ['quantization_channels'] def __init__(self, quantization_channels: 'int'=256) ->None: super(MuLawDecoding, self).__init__() self.quantization_channels = quantization_channels def forward(self, x_mu: 'Tensor') ->Tensor: """ Args: x_mu (Tensor): A mu-law encoded signal which needs to be decoded. Returns: Tensor: The signal decoded. """ return F.mu_law_decoding(x_mu, self.quantization_channels) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp7 < tmp6 tmp9 = tmp8.to(tl.int8) tmp10 = tmp6 < tmp7 tmp11 = tmp10.to(tl.int8) tmp12 = tmp9 - tmp11 tmp13 = tmp12.to(tmp6.dtype) tmp14 = tl_math.abs(tmp6) tmp15 = 5.545177459716797 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 - tmp5 tmp19 = tmp13 * tmp18 tmp20 = tmp19 * tmp1 tl.store(out_ptr0 + x0, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0[grid(256) ](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class MuLawDecodingNew(torch.nn.Module): """Decode mu-law encoded signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0 and quantization_channels - 1 and returns a signal scaled between -1 and 1. Args: quantization_channels (int, optional): Number of channels. (Default: ``256``) """ __constants__ = ['quantization_channels'] def __init__(self, quantization_channels: 'int'=256) ->None: super(MuLawDecodingNew, self).__init__() self.quantization_channels = quantization_channels def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Nayef211/audio
MuLawDecoding
false
11,745
[ "BSD-2-Clause" ]
0
241ab1e8284e589262f510ee9411baf2bc374ded
https://github.com/Nayef211/audio/tree/241ab1e8284e589262f510ee9411baf2bc374ded
AvgPool2d
from torch.nn import Module import torch import torch as th class AvgPool2d(Module): """ This class is the beginning of an exact python port of the torch.nn.AvgPool2d module. Because PySyft cannot hook into layers which are implemented in C++, our special functionalities (such as encrypted computation) do not work with torch.nn.AvgPool2d and so we must have python ports available for all layer types which we seek to use. Note that this module has been tested to ensure that it outputs the exact output values that the main module outputs in the same order that the main module does. However, there is often some rounding error of unknown origin, usually less than 1e-6 in magnitude. This module has not yet been tested with GPUs but should work out of the box. """ def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None): """For information on the constructor arguments, please see PyTorch's documentation in torch.nn.AvgPool2d""" super().__init__() if not (padding == 0 and ceil_mode is False and count_include_pad is True and divisor_override is None): raise NotImplementedError('Not supported settings') if stride is None: stride = kernel_size self.kernel_size = kernel_size self.stride = stride self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad self.divisor_override = divisor_override self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size) def forward(self, data): batch_size, out_channels, rows, cols = data.shape kernel_results = [] for i in range(0, rows - self.kernel_size + 1, self.stride): for j in range(0, cols - self.kernel_size + 1, self.stride): kernel_out = data[:, :, i:i + self.kernel_size, j:j + self. kernel_size].sum((2, 3)) * self._one_over_kernel_size kernel_results.append(kernel_out.unsqueeze(2)) pred = th.cat(kernel_results, axis=2).view(batch_size, out_channels, int(rows / self.stride), int(cols / self.stride)) return pred def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'kernel_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 0.0625 tmp6 = tmp4 * tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mul_sum_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 return reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), class AvgPool2dNew(Module): """ This class is the beginning of an exact python port of the torch.nn.AvgPool2d module. Because PySyft cannot hook into layers which are implemented in C++, our special functionalities (such as encrypted computation) do not work with torch.nn.AvgPool2d and so we must have python ports available for all layer types which we seek to use. Note that this module has been tested to ensure that it outputs the exact output values that the main module outputs in the same order that the main module does. However, there is often some rounding error of unknown origin, usually less than 1e-6 in magnitude. This module has not yet been tested with GPUs but should work out of the box. """ def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None): """For information on the constructor arguments, please see PyTorch's documentation in torch.nn.AvgPool2d""" super().__init__() if not (padding == 0 and ceil_mode is False and count_include_pad is True and divisor_override is None): raise NotImplementedError('Not supported settings') if stride is None: stride = kernel_size self.kernel_size = kernel_size self.stride = stride self.padding = padding self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad self.divisor_override = divisor_override self._one_over_kernel_size = 1 / (self.kernel_size * self.kernel_size) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NiWaRe/PySyft
AvgPool2d
false
11,746
[ "Apache-2.0" ]
0
b5abe66ea949d60be14a08d2e4e32e9587c7bf5c
https://github.com/NiWaRe/PySyft/tree/b5abe66ea949d60be14a08d2e4e32e9587c7bf5c
MultiHeadAttention
import math import torch import torch.nn as nn def scaled_dot_product_attention(query, keys, values, mask=None): d_k = keys.shape[-1] dot_score = query @ keys.transpose(-2, -1) / math.sqrt(d_k) if mask is not None: dot_score = dot_score.masked_fill(mask == 0, -1000000000.0) attn_score = torch.softmax(dot_score, dim=-1) return attn_score @ values, attn_score class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.depth = d_model // num_heads self.d_model = self.num_heads * self.depth self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.wo = nn.Linear(d_model, d_model) def reshape_for_multi_heads_attention(self, t): batch_size = t.shape[0] t = t.view(batch_size, -1, self.num_heads, self.depth) return t.transpose(1, 2) def forward(self, q, k, v, mask): batch_size = q.shape[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.reshape_for_multi_heads_attention(q) k = self.reshape_for_multi_heads_attention(k) v = self.reshape_for_multi_heads_attention(v) scaled_attention, _attention_weights = scaled_dot_product_attention(q, k, v, mask) scaled_attention = scaled_attention.transpose(2, 1).contiguous().view( batch_size, -1, self.d_model) return self.wo(scaled_attention) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4]), torch.rand([4, 4, 16, 16])] def get_init_inputs(): return [[], {'d_model': 4, 'num_heads': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask) tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask) @triton.jit def triton_per_fused__softmax_div_eq_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 256 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp4 = 1.0 tmp5 = tmp3 * tmp4 tmp6 = -1000000000.0 tmp7 = tl.where(tmp2, tmp6, tmp5) tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, float('-inf')) tmp11 = triton_helpers.max2(tmp10, 1)[:, None] tmp12 = tmp7 - tmp11 tmp13 = tl_math.exp(tmp12) tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp18 = tmp13 / tmp17 tl.store(out_ptr0 + (r1 + 16 * x0), tmp2, xmask) tl.store(out_ptr3 + (r1 + 16 * x0), tmp18, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 16 y1 = yindex // 16 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_10, (4, 4, 16, 16), (1024, 256, 16, 1)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .bool) buf9 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_per_fused__softmax_div_eq_masked_fill_1[grid(256)](primals_10, buf5, buf6, buf9, 256, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf5 del primals_10 buf10 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0) del buf1 triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1) del primals_8 buf11 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf10, (16, 16, 1), (16, 1, 0), 0), out=buf11) buf12 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32) triton_poi_fused_clone_2[grid(64, 4)](buf11, buf12, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf13 = reinterpret_tensor(buf11, (64, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_12, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_12 return reinterpret_tensor(buf13, (4, 16, 4), (64, 4, 1), 0 ), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0 ), buf6, buf9, reinterpret_tensor(buf12, (64, 4), (4, 1), 0 ), primals_11, reinterpret_tensor(buf10, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0 ), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0) def scaled_dot_product_attention(query, keys, values, mask=None): d_k = keys.shape[-1] dot_score = query @ keys.transpose(-2, -1) / math.sqrt(d_k) if mask is not None: dot_score = dot_score.masked_fill(mask == 0, -1000000000.0) attn_score = torch.softmax(dot_score, dim=-1) return attn_score @ values, attn_score class MultiHeadAttentionNew(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttentionNew, self).__init__() self.num_heads = num_heads self.depth = d_model // num_heads self.d_model = self.num_heads * self.depth self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.wo = nn.Linear(d_model, d_model) def reshape_for_multi_heads_attention(self, t): batch_size = t.shape[0] t = t.view(batch_size, -1, self.num_heads, self.depth) return t.transpose(1, 2) def forward(self, input_0, input_1, input_2, input_3): primals_2 = self.wq.weight primals_3 = self.wq.bias primals_4 = self.wk.weight primals_5 = self.wk.bias primals_7 = self.wv.weight primals_8 = self.wv.bias primals_11 = self.wo.weight primals_12 = self.wo.bias primals_1 = input_0 primals_6 = input_1 primals_9 = input_2 primals_10 = input_3 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12]) return output[0]
NathanYanJing/TransformerReplication
MultiHeadAttention
false
11,748
[ "MIT" ]
0
b20f987dcc507724971f843c2d214c9c76bd8e34
https://github.com/NathanYanJing/TransformerReplication/tree/b20f987dcc507724971f843c2d214c9c76bd8e34
EncoderLayer
import math import torch import torch.nn as nn import torch.nn.functional as F def scaled_dot_product_attention(query, keys, values, mask=None): d_k = keys.shape[-1] dot_score = query @ keys.transpose(-2, -1) / math.sqrt(d_k) if mask is not None: dot_score = dot_score.masked_fill(mask == 0, -1000000000.0) attn_score = torch.softmax(dot_score, dim=-1) return attn_score @ values, attn_score class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.depth = d_model // num_heads self.d_model = self.num_heads * self.depth self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.wo = nn.Linear(d_model, d_model) def reshape_for_multi_heads_attention(self, t): batch_size = t.shape[0] t = t.view(batch_size, -1, self.num_heads, self.depth) return t.transpose(1, 2) def forward(self, q, k, v, mask): batch_size = q.shape[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.reshape_for_multi_heads_attention(q) k = self.reshape_for_multi_heads_attention(k) v = self.reshape_for_multi_heads_attention(v) scaled_attention, _attention_weights = scaled_dot_product_attention(q, k, v, mask) scaled_attention = scaled_attention.transpose(2, 1).contiguous().view( batch_size, -1, self.d_model) return self.wo(scaled_attention) class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) def forward(self, x): return self.w_2(F.relu(self.w_1(x))) class EncoderLayer(nn.Module): def __init__(self, d_model, num_heads, d_ff, dropout=0.1): super(EncoderLayer, self).__init__() self.multi_head_attention = MultiHeadAttention(d_model, num_heads) self.feed_forward = PositionwiseFeedForward(d_model, d_ff) self.layernorm1 = nn.LayerNorm(d_model) self.layernorm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, x, mask): attention_output = self.dropout1(self.multi_head_attention(x, x, x, mask)) ffn_input = self.layernorm1(x + attention_output) ffn_output = self.dropout2(self.feed_forward(ffn_input)) output = self.layernorm2(ffn_input + ffn_output) return output def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'d_model': 4, 'num_heads': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -1000000000.0 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x2, tmp20, xmask) tl.store(out_ptr1 + x2, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex % 64 x4 = xindex x5 = xindex // 4 tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_out_ptr0 + x4, xmask) tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp2 = 1.0 tmp3 = tmp1 * tmp2 tmp4 = -1000000000.0 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tl.store(in_out_ptr0 + x4, tmp10, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 ) = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) assert_size_stride(primals_15, (4, 4), (4, 1)) assert_size_stride(primals_16, (4,), (1,)) assert_size_stride(primals_17, (4,), (1,)) assert_size_stride(primals_18, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2) del primals_6 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_3 buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0) del buf0 triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_5 buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5) buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_eq_1[grid(64)](primals_8, buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_8 buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0) del buf1 buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5, buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf5 triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6, buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf8 triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf10, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del primals_7 buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0) del buf2 extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11) buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0) del buf7 triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0) del buf11 extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13) del primals_10 buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf13, buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_12 buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17) buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0) del buf17 buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18, primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_14 buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19) buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0) del buf19 triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_16 buf21 = buf15 del buf15 buf22 = buf14 del buf14 triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22, 16, XBLOCK=16, num_warps=1, num_stages=1) buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22, primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf21 del buf22 del primals_18 return (buf23, primals_1, primals_11, primals_17, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor( buf18, (16, 4), (4, 1), 0), buf20, primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)) def scaled_dot_product_attention(query, keys, values, mask=None): d_k = keys.shape[-1] dot_score = query @ keys.transpose(-2, -1) / math.sqrt(d_k) if mask is not None: dot_score = dot_score.masked_fill(mask == 0, -1000000000.0) attn_score = torch.softmax(dot_score, dim=-1) return attn_score @ values, attn_score class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.depth = d_model // num_heads self.d_model = self.num_heads * self.depth self.wq = nn.Linear(d_model, d_model) self.wk = nn.Linear(d_model, d_model) self.wv = nn.Linear(d_model, d_model) self.wo = nn.Linear(d_model, d_model) def reshape_for_multi_heads_attention(self, t): batch_size = t.shape[0] t = t.view(batch_size, -1, self.num_heads, self.depth) return t.transpose(1, 2) def forward(self, q, k, v, mask): batch_size = q.shape[0] q = self.wq(q) k = self.wk(k) v = self.wv(v) q = self.reshape_for_multi_heads_attention(q) k = self.reshape_for_multi_heads_attention(k) v = self.reshape_for_multi_heads_attention(v) scaled_attention, _attention_weights = scaled_dot_product_attention(q, k, v, mask) scaled_attention = scaled_attention.transpose(2, 1).contiguous().view( batch_size, -1, self.d_model) return self.wo(scaled_attention) class PositionwiseFeedForward(nn.Module): def __init__(self, d_model, d_ff): super(PositionwiseFeedForward, self).__init__() self.w_1 = nn.Linear(d_model, d_ff) self.w_2 = nn.Linear(d_ff, d_model) def forward(self, x): return self.w_2(F.relu(self.w_1(x))) class EncoderLayerNew(nn.Module): def __init__(self, d_model, num_heads, d_ff, dropout=0.1): super(EncoderLayerNew, self).__init__() self.multi_head_attention = MultiHeadAttention(d_model, num_heads) self.feed_forward = PositionwiseFeedForward(d_model, d_ff) self.layernorm1 = nn.LayerNorm(d_model) self.layernorm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) def forward(self, input_0, input_1): primals_2 = self.multi_head_attention.wq.weight primals_3 = self.multi_head_attention.wq.bias primals_4 = self.multi_head_attention.wk.weight primals_5 = self.multi_head_attention.wk.bias primals_6 = self.multi_head_attention.wv.weight primals_7 = self.multi_head_attention.wv.bias primals_9 = self.multi_head_attention.wo.weight primals_10 = self.multi_head_attention.wo.bias primals_13 = self.feed_forward.w_1.weight primals_11 = self.feed_forward.w_1.bias primals_15 = self.feed_forward.w_2.weight primals_12 = self.feed_forward.w_2.bias primals_14 = self.layernorm1.weight primals_16 = self.layernorm1.bias primals_17 = self.layernorm2.weight primals_18 = self.layernorm2.bias primals_1 = input_0 primals_8 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18]) return output[0]
NathanYanJing/TransformerReplication
EncoderLayer
false
11,749
[ "MIT" ]
0
b20f987dcc507724971f843c2d214c9c76bd8e34
https://github.com/NathanYanJing/TransformerReplication/tree/b20f987dcc507724971f843c2d214c9c76bd8e34
CustomGruCell
import torch import numpy as np import torch.nn as nn class CustomGruCell(nn.Module): """ A forward only GRU cell. Input should be: (sequence length x batch size x input_size). The output is the output of the final forward call. It's not clear if it would be possible to use the output from each cell in a Plan because of the assumptions of 2D tensors in backprop. """ def __init__(self, input_size, hidden_size, bias=True): super(CustomGruCell, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_in = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias) self.init_parameters() def init_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, x, h): i_r = self.fc_ir(x) h_r = self.fc_hr(h) i_z = self.fc_iz(x) h_z = self.fc_hz(h) i_n = self.fc_in(x) h_n = self.fc_hn(h) resetgate = (i_r + h_r).sigmoid() inputgate = (i_z + h_z).sigmoid() newgate = (i_n + resetgate * h_n).tanh() hy = newgate + inputgate * (h - newgate) return hy def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'hidden_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sigmoid_sub_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp8 = tl.load(in_out_ptr1 + x2, xmask) tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr4 + x2, xmask) tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr6 + x2, xmask) tmp17 = tl.load(in_ptr7 + x2, xmask) tmp21 = tl.load(in_ptr8 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp10 = tmp8 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = tl.sigmoid(tmp14) tmp18 = tmp7 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = libdevice.tanh(tmp19) tmp22 = tmp21 - tmp20 tmp23 = tmp15 * tmp22 tmp24 = tmp20 + tmp23 tl.store(in_out_ptr0 + x2, tmp7, xmask) tl.store(in_out_ptr1 + x2, tmp15, xmask) tl.store(out_ptr0 + x2, tmp24, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4, 4), (4, 1)) assert_size_stride(primals_10, (4,), (1,)) assert_size_stride(primals_11, (4, 4), (4, 1)) assert_size_stride(primals_12, (4,), (1,)) assert_size_stride(primals_13, (4, 4), (4, 1)) assert_size_stride(primals_14, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2) del primals_7 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf3) del primals_9 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_12, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4) del primals_11 del primals_12 buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_14, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf5) del primals_13 del primals_14 buf6 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf7 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_sigmoid_sub_tanh_0[grid(256)](buf6, buf7, primals_2, buf1, primals_5, primals_8, buf3, primals_10, buf4, buf5, primals_6, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del buf3 del primals_10 del primals_2 del primals_5 del primals_8 return buf8, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf4, buf5, buf6, buf7 class CustomGruCellNew(nn.Module): """ A forward only GRU cell. Input should be: (sequence length x batch size x input_size). The output is the output of the final forward call. It's not clear if it would be possible to use the output from each cell in a Plan because of the assumptions of 2D tensors in backprop. """ def __init__(self, input_size, hidden_size, bias=True): super(CustomGruCellNew, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.fc_ir = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hr = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_iz = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hz = nn.Linear(hidden_size, hidden_size, bias=bias) self.fc_in = nn.Linear(input_size, hidden_size, bias=bias) self.fc_hn = nn.Linear(hidden_size, hidden_size, bias=bias) self.init_parameters() def init_parameters(self): std = 1.0 / np.sqrt(self.hidden_size) for w in self.parameters(): w.data.uniform_(-std, std) def forward(self, input_0, input_1): primals_1 = self.fc_ir.weight primals_2 = self.fc_ir.bias primals_4 = self.fc_hr.weight primals_5 = self.fc_hr.bias primals_7 = self.fc_iz.weight primals_8 = self.fc_iz.bias primals_9 = self.fc_hz.weight primals_10 = self.fc_hz.bias primals_11 = self.fc_in.weight primals_12 = self.fc_in.bias primals_13 = self.fc_hn.weight primals_14 = self.fc_hn.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14]) return output[0]
NiWaRe/PySyft
CustomGruCell
false
11,750
[ "Apache-2.0" ]
0
b5abe66ea949d60be14a08d2e4e32e9587c7bf5c
https://github.com/NiWaRe/PySyft/tree/b5abe66ea949d60be14a08d2e4e32e9587c7bf5c
ToLongTensor
import torch from torch import Tensor from typing import List import torch.nn as nn class ToLongTensor(nn.Module): """Convert a list of integers to long tensor """ def __init__(self): super(ToLongTensor, self).__init__() def forward(self, tokens: 'List[List[int]]') ->Tensor: return torch.tensor(tokens) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, class ToLongTensorNew(nn.Module): """Convert a list of integers to long tensor """ def __init__(self): super(ToLongTensorNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NivekT/text
ToLongTensor
false
11,751
[ "BSD-3-Clause" ]
0
4908d3c88f92296a4c23be2f064ccde13cce50ce
https://github.com/NivekT/text/tree/4908d3c88f92296a4c23be2f064ccde13cce50ce